From c625060f0c7984a694365145d1f5061726a522ea Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Mon, 19 Aug 2024 10:48:53 +0800 Subject: [PATCH] LoongArch: Sync to .vec.39 Signed-off-by: Peng Fan --- Add-elf-support.patch | 73 + ...iv16qi-template-for-2x128bit-grouped.patch | 99 + ...n-using-mrecip-div-which-leads-to-Sp.patch | 34 + ...register-when-expand-conditional-mov.patch | 29 + ...bit-floating-point-built-in-function.patch | 51 + ...ternate-__intN__-form-of-__intN-type.patch | 283 + ...sts-for-SX-and-ASX-vector-instructio.patch | 336371 +++++++++++++++ LoongArch-Define-macro-CLEAR_INSN_CACHE.patch | 36 + ...sn-output-of-vec_concat-templates-fo.patch | 135 + LoongArch-Implement-option-save-restore.patch | 317 + ...ent-su-sadv16qi-and-su-sadv32qi-stan.patch | 102 + ...Optimizations-of-vector-construction.patch | 1199 + ...h-Remove-bash-syntax-from-config.gcc.patch | 140 + ...X-for-scalar-FP-rounding-with-explic.patch | 106 + ...mplify_gen_subreg-instead-of-gen_rtx.patch | 180 + LoongArch-add-gnat-ada-compiler-support.patch | 382 + ...Arch-enable-__builtin_thread_pointer.patch | 38 + LoongArch-support-static-pie.patch | 30 + ...E_RATIO-1-using-4-consecutive-scalar.patch | 61 + Optimize-float-vector-unpack-operation.patch | 253 + ...lementation-of-multiplication-operat.patch | 125 + ..._stack_protection-Use-full-sized-mas.patch | 48 + ...-the-movti-movtf-templates-and-fix-t.patch | 95 + ...-headers-to-lib-gcc-loongarch64-linu.patch | 32 + gcc-8.3.0-Fix-bug-for-simpley.patch | 83 + ...g-when-using-mrecip-rsqrt-which-casu.patch | 82 + ...-the-bug-in-loongarch_emit_stack_tie.patch | 34 + ...the-priority-of-registers-t3-through.patch | 49 + ...e-model-fsched-pressure-algorithm-by.patch | 31 + gcc.spec | 65 +- libffi-Add-loongarch-support.patch | 881 + 31 files changed, 341443 insertions(+), 1 deletion(-) create mode 100644 Add-elf-support.patch create mode 100644 Add-vec_initv32qiv16qi-template-for-2x128bit-grouped.patch create mode 100644 Fix-accuracy-when-using-mrecip-div-which-leads-to-Sp.patch create mode 100644 Fix-emit-target-register-when-expand-conditional-mov.patch create mode 100644 Implement-128-bit-floating-point-built-in-function.patch create mode 100644 Implement-alternate-__intN__-form-of-__intN-type.patch create mode 100644 LoongArch-Add-tests-for-SX-and-ASX-vector-instructio.patch create mode 100644 LoongArch-Define-macro-CLEAR_INSN_CACHE.patch create mode 100644 LoongArch-Fix-insn-output-of-vec_concat-templates-fo.patch create mode 100644 LoongArch-Implement-option-save-restore.patch create mode 100644 LoongArch-Implement-su-sadv16qi-and-su-sadv32qi-stan.patch create mode 100644 LoongArch-Optimizations-of-vector-construction.patch create mode 100644 LoongArch-Remove-bash-syntax-from-config.gcc.patch create mode 100644 LoongArch-Use-LSX-for-scalar-FP-rounding-with-explic.patch create mode 100644 LoongArch-Use-simplify_gen_subreg-instead-of-gen_rtx.patch create mode 100644 LoongArch-add-gnat-ada-compiler-support.patch create mode 100644 LoongArch-enable-__builtin_thread_pointer.patch create mode 100644 LoongArch-support-static-pie.patch create mode 100644 Modify-MOVE_RATIO-1-using-4-consecutive-scalar.patch create mode 100644 Optimize-float-vector-unpack-operation.patch create mode 100644 Optimize-the-implementation-of-multiplication-operat.patch create mode 100644 asan.c-asan_emit_stack_protection-Use-full-sized-mas.patch create mode 100644 gcc-8.3.0-Delete-the-movti-movtf-templates-and-fix-t.patch create mode 100644 gcc-8.3.0-Export-headers-to-lib-gcc-loongarch64-linu.patch create mode 100644 gcc-8.3.0-Fix-bug-for-simpley.patch create mode 100644 gcc-8.3.0-Fix-bug-when-using-mrecip-rsqrt-which-casu.patch create mode 100644 gcc-8.3.0-Fix-the-bug-in-loongarch_emit_stack_tie.patch create mode 100644 gcc-8.3.0-Lower-the-priority-of-registers-t3-through.patch create mode 100644 gcc-8.3.0-Use-the-model-fsched-pressure-algorithm-by.patch create mode 100644 libffi-Add-loongarch-support.patch diff --git a/Add-elf-support.patch b/Add-elf-support.patch new file mode 100644 index 0000000..5427784 --- /dev/null +++ b/Add-elf-support.patch @@ -0,0 +1,73 @@ +From 77c1070c35e4e08a5f4d437656cf1b6eb0699577 Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 03:24:04 +0000 +Subject: [PATCH 02/30] Add elf support + +Fix I07198947184ad930182e46787a06e7cbb226b251 +--- + libgcc/config.host | 42 +++++++++++++++++++----------------------- + 1 file changed, 19 insertions(+), 23 deletions(-) + +diff --git a/libgcc/config.host b/libgcc/config.host +index 83ca131aa..5808c4b14 100644 +--- a/libgcc/config.host ++++ b/libgcc/config.host +@@ -923,16 +923,16 @@ mips*-*-linux*) # Linux MIPS, either endian. + esac + md_unwind_header=mips/linux-unwind.h + ;; +-loongarch*-*-linux*) # Linux MIPS, either endian. +- extra_parts="$extra_parts crtfastmath.o" +- tmake_file="${tmake_file} t-crtfm loongarch/t-crtstuff" +- case ${host} in +- *) +- tmake_file="${tmake_file} t-slibgcc-libgcc" +- ;; +- esac +- md_unwind_header=loongarch/linux-unwind.h +- ;; ++loongarch*-*-linux*) # Linux MIPS, either endian. ++ extra_parts="$extra_parts crtfastmath.o" ++ tmake_file="${tmake_file} t-crtfm loongarch/t-crtstuff" ++ case ${host} in ++ *) ++ tmake_file="${tmake_file} t-slibgcc-libgcc" ++ ;; ++ esac ++ md_unwind_header=loongarch/linux-unwind.h ++ ;; + mips*-sde-elf*) + tmake_file="$tmake_file mips/t-crtstuff mips/t-mips16" + case "${with_newlib}" in +@@ -946,19 +946,15 @@ mips*-sde-elf*) + esac + extra_parts="$extra_parts crti.o crtn.o" + ;; +-loongarch*-sde-elf*) +- tmake_file="$tmake_file loongarch/t-crtstuff" +- case "${with_newlib}" in +- yes) +- # newlib / libgloss. +- ;; +- *) +- # MIPS toolkit libraries. +- tmake_file="$tmake_file loongarch/t-sdemtk" +- ;; +- esac +- extra_parts="$extra_parts crti.o crtn.o" +- ;; ++loongarch64-*-elf*) ++ extra_parts="$extra_parts crtfastmath.o" ++ tmake_file="${tmake_file} t-crtfm loongarch/t-crtstuff" ++ case ${host} in ++ *) ++ tmake_file="${tmake_file} t-slibgcc-libgcc" ++ ;; ++ esac ++ ;; + mipsisa32-*-elf* | mipsisa32el-*-elf* | \ + mipsisa32r2-*-elf* | mipsisa32r2el-*-elf* | \ + mipsisa32r6-*-elf* | mipsisa32r6el-*-elf* | \ +-- +2.43.5 + diff --git a/Add-vec_initv32qiv16qi-template-for-2x128bit-grouped.patch b/Add-vec_initv32qiv16qi-template-for-2x128bit-grouped.patch new file mode 100644 index 0000000..2678a78 --- /dev/null +++ b/Add-vec_initv32qiv16qi-template-for-2x128bit-grouped.patch @@ -0,0 +1,99 @@ +From 057d88b4cd22c1ac5d7590580e0c4702fc57cbac Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 06:55:48 +0000 +Subject: [PATCH 03/30] Add vec_initv32qiv16qi template for 2x128bit-grouped + vector init operation. + +Signed-off-by: Peng Fan +--- + gcc/config/loongarch/lasx.md | 9 ++++++++ + gcc/config/loongarch/loongarch-protos.h | 1 + + gcc/config/loongarch/loongarch.c | 9 ++++++++ + .../gcc.target/loongarch/vec_initv32qiv16qi.c | 23 +++++++++++++++++++ + 4 files changed, 42 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vec_initv32qiv16qi.c + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index 515336e05..01a21bd6c 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -519,6 +519,15 @@ + DONE; + }) + ++(define_expand "vec_initv32qiv16qi" ++ [(match_operand:V32QI 0 "register_operand") ++ (match_operand:V16QI 1 "")] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vector_group_init (operands[0], operands[1]); ++ DONE; ++}) ++ + ;; FIXME: Delete. + (define_insn "vec_pack_trunc_" + [(set (match_operand: 0 "register_operand" "=f") +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 498d80514..08a9e8dc2 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -163,6 +163,7 @@ union loongarch_gen_fn_ptrs + extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs, + rtx, rtx, rtx, rtx, rtx); + ++extern void loongarch_expand_vector_group_init (rtx, rtx); + extern void loongarch_expand_vector_init (rtx, rtx); + extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool); + extern void loongarch_expand_vec_perm (rtx, rtx, rtx, rtx); +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index 8a1337b96..1fd27d99e 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -9693,6 +9693,15 @@ loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val) + + /* Expand a vector initialization. */ + ++void ++loongarch_expand_vector_group_init (rtx target, rtx vals) ++{ ++ machine_mode mode = GET_MODE (target); ++ rtx ops[2] = { XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1) }; ++ emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (E_V32QImode, ops[0], ++ ops[1]))); ++} ++ + void + loongarch_expand_vector_init (rtx target, rtx vals) + { +diff --git a/gcc/testsuite/gcc.target/loongarch/vec_initv32qiv16qi.c b/gcc/testsuite/gcc.target/loongarch/vec_initv32qiv16qi.c +new file mode 100644 +index 000000000..bc1ca7a08 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vec_initv32qiv16qi.c +@@ -0,0 +1,23 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mlasx -O3 -ftree-vectorize -fdump-tree-vect-details" } */ ++ ++typedef unsigned char uint8_t; ++ ++int ++test_func (uint8_t *pix1, int i_stride_pix1, ++ uint8_t *pix2, int i_stride_pix2) ++{ ++ int i_sum = 0; ++ for (int y = 0; y < 16; y++) ++ { ++ for (int x = 0; x < 16; x++) ++ { ++ i_sum += __builtin_abs (pix1[x] - pix2[x]); ++ } ++ pix1 += i_stride_pix1; ++ pix2 += i_stride_pix2; ++ } ++ return i_sum; ++} ++ ++/* { dg-final { scan-tree-dump "vect_cst__438 = {_442, _440}" "vect" } } */ +-- +2.43.5 + diff --git a/Fix-accuracy-when-using-mrecip-div-which-leads-to-Sp.patch b/Fix-accuracy-when-using-mrecip-div-which-leads-to-Sp.patch new file mode 100644 index 0000000..d41eee5 --- /dev/null +++ b/Fix-accuracy-when-using-mrecip-div-which-leads-to-Sp.patch @@ -0,0 +1,34 @@ +From dee18a01673455bd0f7a3389ae4ef1cffddcc4fc Mon Sep 17 00:00:00 2001 +From: Guo Jie +Date: Sun, 25 Jun 2023 13:16:23 +0800 +Subject: [PATCH 04/30] Fix accuracy when using -mrecip=div, which leads to + Spec2017-538 compare error. + +Change-Id: I3286bd507ea9a1a171956890979cd08e4b6685ff +--- + gcc/config/loongarch/loongarch.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index 1fd27d99e..4fdec9d6c 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -10698,11 +10698,11 @@ void loongarch_emit_swdivsf (rtx res, rtx a, rtx b, machine_mode mode) + /* 2.0 - b * x0; */ + emit_insn (gen_rtx_SET (e0, gen_rtx_FMA (mode,gen_rtx_NEG(mode, b), x0, mtwo))); + +- /* x1 = x0 * e0 */ +- emit_insn (gen_rtx_SET (x1, gen_rtx_MULT (mode, x0, e0))); ++ /* x1 = a * x0 */ ++ emit_insn (gen_rtx_SET (x1, gen_rtx_MULT (mode, a, x0))); + +- /* res = a * x1 */ +- emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, a, x1))); ++ /* res = e0 * x1 */ ++ emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, e0, x1))); + } + + /* LoongArch only implements preld hint=0 (prefetch for load) and hint=8 +-- +2.43.5 + diff --git a/Fix-emit-target-register-when-expand-conditional-mov.patch b/Fix-emit-target-register-when-expand-conditional-mov.patch new file mode 100644 index 0000000..e741026 --- /dev/null +++ b/Fix-emit-target-register-when-expand-conditional-mov.patch @@ -0,0 +1,29 @@ +From d6ba18bc9192912fb69f3c7303ccc5c699d44360 Mon Sep 17 00:00:00 2001 +From: Jinyang He +Date: Wed, 15 Nov 2023 15:52:13 +0800 +Subject: [PATCH 23/30] Fix emit target register when expand conditional move + +Change-Id: Iaced456c31ee8541042a602c5eef27292733e8b6 +--- + gcc/config/loongarch/loongarch.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index cd0a7f4ee..3cae3cc9b 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -4906,9 +4906,10 @@ loongarch_expand_conditional_move_la464 (rtx *operands) + if(FLOAT_MODE_P(sel_mode)){ + rtx target = gen_reg_rtx (GET_MODE (op0)); + bool invert = false; +- loongarch_emit_int_order_test (LTU, NULL, op0, ++ loongarch_emit_int_order_test (LTU, NULL, target, + force_reg (GET_MODE (op0), const0_rtx), + op0); ++ op0 = target; + op1 = const0_rtx; + } + } +-- +2.43.5 + diff --git a/Implement-128-bit-floating-point-built-in-function.patch b/Implement-128-bit-floating-point-built-in-function.patch new file mode 100644 index 0000000..28d0eba --- /dev/null +++ b/Implement-128-bit-floating-point-built-in-function.patch @@ -0,0 +1,51 @@ +From ebc3eeb8aba7138207f3fb6bae39e12e8c0864dc Mon Sep 17 00:00:00 2001 +From: chenxiaolong +Date: Wed, 6 Sep 2023 18:01:56 +0800 +Subject: [PATCH 18/30] Implement 128-bit floating point built-in function + +Change-Id: Id968d6d8e16e2e963e79b964ee5aa30fcad88cb5 +--- + gcc/config/loongarch/loongarch-builtins.c | 5 +++++ + gcc/config/loongarch/loongarch-c.c | 10 ++++++++++ + 2 files changed, 15 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch-builtins.c b/gcc/config/loongarch/loongarch-builtins.c +index b326ec46c..8c89eb908 100644 +--- a/gcc/config/loongarch/loongarch-builtins.c ++++ b/gcc/config/loongarch/loongarch-builtins.c +@@ -2463,6 +2463,11 @@ loongarch_init_builtins (void) + unsigned int i; + tree type; + ++ /* Register the type long_double_type_node as a built-in type and ++ give it an alias "__float128". */ ++ (*lang_hooks.types.register_builtin_type) (long_double_type_node, ++ "__float128"); ++ + /* Iterate through all of the bdesc arrays, initializing all of the + builtin functions. */ + for (i = 0; i < ARRAY_SIZE (loongarch_builtins); i++) +diff --git a/gcc/config/loongarch/loongarch-c.c b/gcc/config/loongarch/loongarch-c.c +index f8583f7aa..a9e909157 100644 +--- a/gcc/config/loongarch/loongarch-c.c ++++ b/gcc/config/loongarch/loongarch-c.c +@@ -116,6 +116,16 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile) + builtin_define ("__loongarch_simd_width=256"); + } + ++ /* Add support for FLOAT128_TYPE on the LoongArch architecture. */ ++ builtin_define ("__FLOAT128_TYPE__"); ++ ++ /* Map the old _Float128 'q' builtins into the new 'f128' builtins. */ ++ builtin_define ("__builtin_fabsq=__builtin_fabsf128"); ++ builtin_define ("__builtin_copysignq=__builtin_copysignf128"); ++ builtin_define ("__builtin_nanq=__builtin_nanf128"); ++ builtin_define ("__builtin_nansq=__builtin_nansf128"); ++ builtin_define ("__builtin_infq=__builtin_inff128"); ++ builtin_define ("__builtin_huge_valq=__builtin_huge_valf128"); + + /* Native Data Sizes. */ + builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE); +-- +2.43.5 + diff --git a/Implement-alternate-__intN__-form-of-__intN-type.patch b/Implement-alternate-__intN__-form-of-__intN-type.patch new file mode 100644 index 0000000..644a3c8 --- /dev/null +++ b/Implement-alternate-__intN__-form-of-__intN-type.patch @@ -0,0 +1,283 @@ +From 7990ee5aec84c8dbca48cf908643d720c880b0ac Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 07:40:13 +0000 +Subject: [PATCH 29/30] Implement alternate __intN__ form of __intN type + +Signed-off-by: Peng Fan +--- + gcc/brig/brig-lang.c | 6 ++++-- + gcc/c-family/c-common.c | 6 ++++++ + gcc/c/c-decl.c | 6 +++++- + gcc/c/c-parser.c | 5 +++++ + gcc/config/msp430/msp430.h | 6 ++++-- + gcc/cp/cp-tree.h | 3 +++ + gcc/cp/decl.c | 6 +++++- + gcc/cp/lex.c | 5 +++++ + gcc/cp/parser.c | 6 ++++++ + gcc/gimple-ssa-sprintf.c | 6 ++++-- + gcc/lto/lto-lang.c | 6 ++++-- + gcc/stor-layout.c | 6 ++++-- + gcc/tree.c | 13 +++++++++---- + 13 files changed, 64 insertions(+), 16 deletions(-) + +diff --git a/gcc/brig/brig-lang.c b/gcc/brig/brig-lang.c +index 997dad419..604107623 100644 +--- a/gcc/brig/brig-lang.c ++++ b/gcc/brig/brig-lang.c +@@ -860,10 +860,12 @@ brig_build_c_type_nodes (void) + for (i = 0; i < NUM_INT_N_ENTS; i++) + if (int_n_enabled_p[i]) + { +- char name[50]; ++ char name[50], altname[50]; + sprintf (name, "__int%d unsigned", int_n_data[i].bitsize); ++ sprintf (altname, "__int%d__ unsigned", int_n_data[i].bitsize); + +- if (strcmp (name, SIZE_TYPE) == 0) ++ if (strcmp (name, SIZE_TYPE) == 0 ++ || strcmp (altname, SIZE_TYPE) == 0) + { + intmax_type_node = int_n_trees[i].signed_type; + uintmax_type_node = int_n_trees[i].unsigned_type; +diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c +index d68987c48..a4a279367 100644 +--- a/gcc/c-family/c-common.c ++++ b/gcc/c-family/c-common.c +@@ -3979,8 +3979,14 @@ c_common_nodes_and_builtins (void) + sprintf (name, "__int%d", int_n_data[i].bitsize); + record_builtin_type ((enum rid)(RID_FIRST_INT_N + i), name, + int_n_trees[i].signed_type); ++ sprintf (name, "__int%d__", int_n_data[i].bitsize); ++ record_builtin_type ((enum rid)(RID_FIRST_INT_N + i), name, ++ int_n_trees[i].signed_type); ++ + sprintf (name, "__int%d unsigned", int_n_data[i].bitsize); + record_builtin_type (RID_MAX, name, int_n_trees[i].unsigned_type); ++ sprintf (name, "__int%d__ unsigned", int_n_data[i].bitsize); ++ record_builtin_type (RID_MAX, name, int_n_trees[i].unsigned_type); + } + + if (c_dialect_cxx ()) +diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c +index 9bf20e3e4..22c953025 100644 +--- a/gcc/c/c-decl.c ++++ b/gcc/c/c-decl.c +@@ -10471,7 +10471,11 @@ declspecs_add_type (location_t loc, struct c_declspecs *specs, + case RID_INT_N_2: + case RID_INT_N_3: + specs->int_n_idx = i - RID_INT_N_0; +- if (!in_system_header_at (input_location)) ++ if (!in_system_header_at (input_location) ++ /* If the INT_N type ends in "__", and so is of the format ++ "__intN__", don't pedwarn. */ ++ && (strncmp (IDENTIFIER_POINTER (type) ++ + (IDENTIFIER_LENGTH (type) - 2), "__", 2) != 0)) + pedwarn (loc, OPT_Wpedantic, + "ISO C does not support %<__int%d%> types", + int_n_data[specs->int_n_idx].bitsize); +diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c +index b41e5eb8b..8a1d1968d 100644 +--- a/gcc/c/c-parser.c ++++ b/gcc/c/c-parser.c +@@ -156,6 +156,11 @@ c_parse_init (void) + id = get_identifier (name); + C_SET_RID_CODE (id, RID_FIRST_INT_N + i); + C_IS_RESERVED_WORD (id) = 1; ++ ++ sprintf (name, "__int%d__", int_n_data[i].bitsize); ++ id = get_identifier (name); ++ C_SET_RID_CODE (id, RID_FIRST_INT_N + i); ++ C_IS_RESERVED_WORD (id) = 1; + } + } + +diff --git a/gcc/config/msp430/msp430.h b/gcc/config/msp430/msp430.h +index 6bfe28c2f..e3a41eb04 100644 +--- a/gcc/config/msp430/msp430.h ++++ b/gcc/config/msp430/msp430.h +@@ -180,9 +180,11 @@ extern const char * msp430_select_hwmult_lib (int, const char **); + /* Layout of Source Language Data Types */ + + #undef SIZE_TYPE +-#define SIZE_TYPE (TARGET_LARGE ? "__int20 unsigned" : "unsigned int") ++#define SIZE_TYPE (TARGET_LARGE \ ++ ? "__int20__ unsigned" \ ++ : "unsigned int") + #undef PTRDIFF_TYPE +-#define PTRDIFF_TYPE (TARGET_LARGE ? "__int20" : "int") ++#define PTRDIFF_TYPE (TARGET_LARGE ? "__int20__" : "int") + #undef WCHAR_TYPE + #define WCHAR_TYPE "long int" + #undef WCHAR_TYPE_SIZE +diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h +index 1456c8ef5..a795f9dc9 100644 +--- a/gcc/cp/cp-tree.h ++++ b/gcc/cp/cp-tree.h +@@ -5764,6 +5764,9 @@ struct cp_decl_specifier_seq { + BOOL_BITFIELD gnu_thread_keyword_p : 1; + /* True iff the type is a decltype. */ + BOOL_BITFIELD decltype_p : 1; ++ /* True iff the alternate "__intN__" form of the __intN type has been ++ used. */ ++ BOOL_BITFIELD int_n_alt: 1; + }; + + /* The various kinds of declarators. */ +diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c +index 12adcbd7a..ff95f7ff3 100644 +--- a/gcc/cp/decl.c ++++ b/gcc/cp/decl.c +@@ -10204,6 +10204,7 @@ grokdeclarator (const cp_declarator *declarator, + tree type = NULL_TREE; + int longlong = 0; + int explicit_intN = 0; ++ int int_n_alt = 0; + int virtualp, explicitp, friendp, inlinep, staticp; + int explicit_int = 0; + int explicit_char = 0; +@@ -10277,6 +10278,7 @@ grokdeclarator (const cp_declarator *declarator, + long_p = decl_spec_seq_has_spec_p (declspecs, ds_long); + longlong = decl_spec_seq_has_spec_p (declspecs, ds_long_long); + explicit_intN = declspecs->explicit_intN_p; ++ int_n_alt = declspecs->int_n_alt; + thread_p = decl_spec_seq_has_spec_p (declspecs, ds_thread); + + // Was concept_p specified? Note that ds_concept +@@ -10677,7 +10679,9 @@ grokdeclarator (const cp_declarator *declarator, + int_n_data[declspecs->int_n_idx].bitsize); + explicit_intN = false; + } +- else if (pedantic && ! in_system_header_at (input_location)) ++ /* Don't pedwarn if the alternate "__intN__" form has been used instead ++ of "__intN". */ ++ else if (!int_n_alt && pedantic && ! in_system_header_at (input_location)) + pedwarn (input_location, OPT_Wpedantic, + "ISO C++ does not support %<__int%d%> for %qs", + int_n_data[declspecs->int_n_idx].bitsize, name); +diff --git a/gcc/cp/lex.c b/gcc/cp/lex.c +index 4d30eb5c8..d5ee63ecc 100644 +--- a/gcc/cp/lex.c ++++ b/gcc/cp/lex.c +@@ -256,6 +256,11 @@ init_reswords (void) + id = get_identifier (name); + C_SET_RID_CODE (id, RID_FIRST_INT_N + i); + set_identifier_kind (id, cik_keyword); ++ ++ sprintf (name, "__int%d__", int_n_data[i].bitsize); ++ id = get_identifier (name); ++ C_SET_RID_CODE (id, RID_FIRST_INT_N + i); ++ set_identifier_kind (id, cik_keyword); + } + } + +diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c +index c8c867a3f..4b60e9570 100644 +--- a/gcc/cp/parser.c ++++ b/gcc/cp/parser.c +@@ -17047,6 +17047,12 @@ cp_parser_simple_type_specifier (cp_parser* parser, + { + decl_specs->explicit_intN_p = true; + decl_specs->int_n_idx = idx; ++ /* Check if the alternate "__intN__" form has been used instead of ++ "__intN". */ ++ if (strncmp (IDENTIFIER_POINTER (token->u.value) ++ + (IDENTIFIER_LENGTH (token->u.value) - 2), ++ "__", 2) == 0) ++ decl_specs->int_n_alt = true; + } + type = int_n_trees [idx].signed_type; + break; +diff --git a/gcc/gimple-ssa-sprintf.c b/gcc/gimple-ssa-sprintf.c +index 613b3fe1e..1fc568784 100644 +--- a/gcc/gimple-ssa-sprintf.c ++++ b/gcc/gimple-ssa-sprintf.c +@@ -1115,10 +1115,12 @@ build_intmax_type_nodes (tree *pintmax, tree *puintmax) + for (int i = 0; i < NUM_INT_N_ENTS; i++) + if (int_n_enabled_p[i]) + { +- char name[50]; ++ char name[50], altname[50]; + sprintf (name, "__int%d unsigned", int_n_data[i].bitsize); ++ sprintf (altname, "__int%d__ unsigned", int_n_data[i].bitsize); + +- if (strcmp (name, UINTMAX_TYPE) == 0) ++ if (strcmp (name, UINTMAX_TYPE) == 0 ++ || strcmp (altname, UINTMAX_TYPE) == 0) + { + *pintmax = int_n_trees[i].signed_type; + *puintmax = int_n_trees[i].unsigned_type; +diff --git a/gcc/lto/lto-lang.c b/gcc/lto/lto-lang.c +index a310d699a..859c307af 100644 +--- a/gcc/lto/lto-lang.c ++++ b/gcc/lto/lto-lang.c +@@ -1240,10 +1240,12 @@ lto_build_c_type_nodes (void) + for (i = 0; i < NUM_INT_N_ENTS; i++) + if (int_n_enabled_p[i]) + { +- char name[50]; ++ char name[50], altname[50]; + sprintf (name, "__int%d unsigned", int_n_data[i].bitsize); ++ sprintf (altname, "__int%d__ unsigned", int_n_data[i].bitsize); + +- if (strcmp (name, SIZE_TYPE) == 0) ++ if (strcmp (name, SIZE_TYPE) == 0 ++ || strcmp (altname, SIZE_TYPE) == 0) + { + intmax_type_node = int_n_trees[i].signed_type; + uintmax_type_node = int_n_trees[i].unsigned_type; +diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c +index 253772322..61700873f 100644 +--- a/gcc/stor-layout.c ++++ b/gcc/stor-layout.c +@@ -2707,10 +2707,12 @@ initialize_sizetypes (void) + for (i = 0; i < NUM_INT_N_ENTS; i++) + if (int_n_enabled_p[i]) + { +- char name[50]; ++ char name[50], altname[50]; + sprintf (name, "__int%d unsigned", int_n_data[i].bitsize); ++ sprintf (altname, "__int%d__ unsigned", int_n_data[i].bitsize); + +- if (strcmp (name, SIZETYPE) == 0) ++ if (strcmp (name, SIZETYPE) == 0 ++ || strcmp (altname, SIZETYPE) == 0) + { + precision = int_n_data[i].bitsize; + } +diff --git a/gcc/tree.c b/gcc/tree.c +index cba022267..6585ab81e 100644 +--- a/gcc/tree.c ++++ b/gcc/tree.c +@@ -9821,10 +9821,12 @@ build_common_tree_nodes (bool signed_char) + for (i = 0; i < NUM_INT_N_ENTS; i++) + if (int_n_enabled_p[i]) + { +- char name[50]; ++ char name[50], altname[50]; + sprintf (name, "__int%d unsigned", int_n_data[i].bitsize); ++ sprintf (altname, "__int%d__ unsigned", int_n_data[i].bitsize); + +- if (strcmp (name, SIZE_TYPE) == 0) ++ if (strcmp (name, SIZE_TYPE) == 0 ++ || strcmp (altname, SIZE_TYPE) == 0) + { + size_type_node = int_n_trees[i].unsigned_type; + } +@@ -9848,9 +9850,12 @@ build_common_tree_nodes (bool signed_char) + for (int i = 0; i < NUM_INT_N_ENTS; i++) + if (int_n_enabled_p[i]) + { +- char name[50]; ++ char name[50], altname[50]; + sprintf (name, "__int%d", int_n_data[i].bitsize); +- if (strcmp (name, PTRDIFF_TYPE) == 0) ++ sprintf (altname, "__int%d__", int_n_data[i].bitsize); ++ ++ if (strcmp (name, PTRDIFF_TYPE) == 0 ++ || strcmp (altname, PTRDIFF_TYPE) == 0) + ptrdiff_type_node = int_n_trees[i].signed_type; + } + if (ptrdiff_type_node == NULL_TREE) +-- +2.43.5 + diff --git a/LoongArch-Add-tests-for-SX-and-ASX-vector-instructio.patch b/LoongArch-Add-tests-for-SX-and-ASX-vector-instructio.patch new file mode 100644 index 0000000..da36f66 --- /dev/null +++ b/LoongArch-Add-tests-for-SX-and-ASX-vector-instructio.patch @@ -0,0 +1,336371 @@ +From 62106786f2dc58963b801064b854ce54940c0564 Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 07:18:30 +0000 +Subject: [PATCH 17/30] LoongArch:Add tests for SX and ASX vector instruction. + +Signed-off-by: Peng Fan +--- + .../gcc.target/loongarch/atomic/atomic.exp | 40 + + .../loongarch/atomic/atomic_add_fetch.c | 16 + + .../atomic/atomic_compare_exchange.c | 9 + + .../atomic/atomic_compare_exchange_n.c | 59 + + .../gcc.target/loongarch/atomic/atomic_load.c | 9 + + .../loongarch/atomic/atomic_store.c | 9 + + .../gcc.target/loongarch/atomic/atomic_swap.c | 17 + + .../loongarch/atomic/atomic_thread_fence1.c | 9 + + .../loongarch/atomic/atomic_thread_fence2.c | 9 + + .../loongarch/atomic/atomic_thread_fence3.c | 15 + + gcc/testsuite/gcc.target/loongarch/cmov_ff.c | 17 + + gcc/testsuite/gcc.target/loongarch/cmov_fi.c | 17 + + gcc/testsuite/gcc.target/loongarch/cmov_if.c | 17 + + gcc/testsuite/gcc.target/loongarch/cmov_ii.c | 17 + + .../gcc.target/loongarch/fcopysign.c | 17 + + .../loongarch/insn_correctness_check.c | 159432 --------------- + .../gcc.target/loongarch/larch-builtin.c | 265 + + .../gcc.target/loongarch/loongarch.exp | 2 +- + .../gcc.target/loongarch/math-float-128.c | 81 + + .../memcpy-inline-lasx-strict-align.c | 9 + + .../gcc.target/loongarch/memcpy-inline-lasx.c | 9 + + .../loongarch/memcpy-inline-noalign.c | 9 + + .../loongarch/memcpy-inline-strict-align.c | 9 + + gcc/testsuite/gcc.target/loongarch/mulh.c | 1 + + gcc/testsuite/gcc.target/loongarch/mulw_d.c | 1 + + gcc/testsuite/gcc.target/loongarch/pr106459.c | 13 + + .../gcc.target/loongarch/pr112476-3.c | 58 + + .../gcc.target/loongarch/pr112476-4.c | 4 + + .../gcc.target/loongarch/prolog-opt.c | 14 + + .../gcc.target/loongarch/recip_sqrt.c | 11 + + .../gcc.target/loongarch/stack-realign.c | 34 + + .../loongarch/stack-usage-realign.c | 19 + + .../gcc.target/loongarch/strict-align.c | 13 + + .../gcc.target/loongarch/vec-unpack.c | 16 +- + .../gcc.target/loongarch/vec_initv32qiv16qi.c | 6 +- + .../loongarch/vect-frint-scalar-no-inexact.c | 21 + + .../gcc.target/loongarch/vect-frint-scalar.c | 39 + + .../{ => vector/lasx}/lasx-builtin.c | 4479 +- + .../loongarch/vector/lasx/lasx-sad.c | 20 + + .../loongarch/vector/lasx/lasx-xvabsd-1.c | 485 + + .../loongarch/vector/lasx/lasx-xvabsd-2.c | 650 + + .../loongarch/vector/lasx/lasx-xvadd.c | 725 + + .../loongarch/vector/lasx/lasx-xvadda.c | 785 + + .../loongarch/vector/lasx/lasx-xvaddi.c | 427 + + .../loongarch/vector/lasx/lasx-xvaddwev-1.c | 740 + + .../loongarch/vector/lasx/lasx-xvaddwev-2.c | 485 + + .../loongarch/vector/lasx/lasx-xvaddwev-3.c | 515 + + .../loongarch/vector/lasx/lasx-xvaddwod-1.c | 530 + + .../loongarch/vector/lasx/lasx-xvaddwod-2.c | 560 + + .../loongarch/vector/lasx/lasx-xvaddwod-3.c | 485 + + .../loongarch/vector/lasx/lasx-xvand.c | 155 + + .../loongarch/vector/lasx/lasx-xvandi.c | 196 + + .../loongarch/vector/lasx/lasx-xvandn.c | 125 + + .../loongarch/vector/lasx/lasx-xvavg-1.c | 680 + + .../loongarch/vector/lasx/lasx-xvavg-2.c | 560 + + .../loongarch/vector/lasx/lasx-xvavgr-1.c | 770 + + .../loongarch/vector/lasx/lasx-xvavgr-2.c | 650 + + .../loongarch/vector/lasx/lasx-xvbitclr.c | 635 + + .../loongarch/vector/lasx/lasx-xvbitclri.c | 515 + + .../loongarch/vector/lasx/lasx-xvbitrev.c | 650 + + .../loongarch/vector/lasx/lasx-xvbitrevi.c | 317 + + .../loongarch/vector/lasx/lasx-xvbitsel.c | 134 + + .../loongarch/vector/lasx/lasx-xvbitseli.c | 185 + + .../loongarch/vector/lasx/lasx-xvbitset.c | 620 + + .../loongarch/vector/lasx/lasx-xvbitseti.c | 405 + + .../loongarch/vector/lasx/lasx-xvbsll_v.c | 130 + + .../loongarch/vector/lasx/lasx-xvbsrl_v.c | 64 + + .../loongarch/vector/lasx/lasx-xvclo.c | 449 + + .../loongarch/vector/lasx/lasx-xvclz.c | 504 + + .../loongarch/vector/lasx/lasx-xvdiv-1.c | 485 + + .../loongarch/vector/lasx/lasx-xvdiv-2.c | 500 + + .../loongarch/vector/lasx/lasx-xvext2xv-1.c | 515 + + .../loongarch/vector/lasx/lasx-xvext2xv-2.c | 669 + + .../loongarch/vector/lasx/lasx-xvexth-1.c | 350 + + .../loongarch/vector/lasx/lasx-xvexth-2.c | 592 + + .../loongarch/vector/lasx/lasx-xvextl-1.c | 86 + + .../loongarch/vector/lasx/lasx-xvextl-2.c | 163 + + .../loongarch/vector/lasx/lasx-xvextrins.c | 515 + + .../loongarch/vector/lasx/lasx-xvfadd_d.c | 545 + + .../loongarch/vector/lasx/lasx-xvfadd_s.c | 911 + + .../loongarch/vector/lasx/lasx-xvfclass_d.c | 152 + + .../loongarch/vector/lasx/lasx-xvfclass_s.c | 95 + + .../loongarch/vector/lasx/lasx-xvfcmp_caf_s.c | 446 + + .../loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c | 977 + + .../loongarch/vector/lasx/lasx-xvfcmp_cle_s.c | 759 + + .../loongarch/vector/lasx/lasx-xvfcmp_clt_s.c | 675 + + .../loongarch/vector/lasx/lasx-xvfcmp_cne_s.c | 872 + + .../loongarch/vector/lasx/lasx-xvfcmp_cor_s.c | 340 + + .../loongarch/vector/lasx/lasx-xvfcmp_cun_s.c | 361 + + .../loongarch/vector/lasx/lasx-xvfcmp_saf_s.c | 424 + + .../loongarch/vector/lasx/lasx-xvfcmp_seq_s.c | 924 + + .../loongarch/vector/lasx/lasx-xvfcmp_sle_s.c | 627 + + .../loongarch/vector/lasx/lasx-xvfcmp_slt_s.c | 1212 + + .../loongarch/vector/lasx/lasx-xvfcmp_sne_s.c | 756 + + .../loongarch/vector/lasx/lasx-xvfcmp_sor_s.c | 438 + + .../loongarch/vector/lasx/lasx-xvfcmp_sun_s.c | 363 + + .../loongarch/vector/lasx/lasx-xvfcvt.c | 528 + + .../loongarch/vector/lasx/lasx-xvfcvth.c | 485 + + .../loongarch/vector/lasx/lasx-xvffint-1.c | 375 + + .../loongarch/vector/lasx/lasx-xvffint-2.c | 246 + + .../loongarch/vector/lasx/lasx-xvffinth.c | 262 + + .../loongarch/vector/lasx/lasx-xvflogb_d.c | 86 + + .../loongarch/vector/lasx/lasx-xvflogb_s.c | 115 + + .../loongarch/vector/lasx/lasx-xvfmadd_d.c | 382 + + .../loongarch/vector/lasx/lasx-xvfmadd_s.c | 720 + + .../loongarch/vector/lasx/lasx-xvfmax_d.c | 230 + + .../loongarch/vector/lasx/lasx-xvfmax_s.c | 560 + + .../loongarch/vector/lasx/lasx-xvfmaxa_d.c | 230 + + .../loongarch/vector/lasx/lasx-xvfmaxa_s.c | 506 + + .../loongarch/vector/lasx/lasx-xvfnmadd_d.c | 324 + + .../loongarch/vector/lasx/lasx-xvfnmadd_s.c | 895 + + .../loongarch/vector/lasx/lasx-xvfrint_d.c | 429 + + .../loongarch/vector/lasx/lasx-xvfrint_s.c | 723 + + .../loongarch/vector/lasx/lasx-xvfrstp.c | 381 + + .../loongarch/vector/lasx/lasx-xvfrstpi.c | 350 + + .../loongarch/vector/lasx/lasx-xvfsqrt_d.c | 482 + + .../loongarch/vector/lasx/lasx-xvfsqrt_s.c | 457 + + .../loongarch/vector/lasx/lasx-xvftint-1.c | 471 + + .../loongarch/vector/lasx/lasx-xvftint-2.c | 1565 + + .../loongarch/vector/lasx/lasx-xvftint-3.c | 511 + + .../loongarch/vector/lasx/lasx-xvftintl.c | 1580 + + .../loongarch/vector/lasx/lasx-xvhaddw-1.c | 560 + + .../loongarch/vector/lasx/lasx-xvhaddw-2.c | 650 + + .../loongarch/vector/lasx/lasx-xvhsubw-1.c | 620 + + .../loongarch/vector/lasx/lasx-xvhsubw-2.c | 545 + + .../loongarch/vector/lasx/lasx-xvilvh.c | 530 + + .../loongarch/vector/lasx/lasx-xvilvl.c | 620 + + .../loongarch/vector/lasx/lasx-xvinsgr2vr.c | 272 + + .../loongarch/vector/lasx/lasx-xvinsve0.c | 380 + + .../loongarch/vector/lasx/lasx-xvld.c | 86 + + .../loongarch/vector/lasx/lasx-xvldi.c | 83 + + .../loongarch/vector/lasx/lasx-xvldrepl.c | 16 + + .../loongarch/vector/lasx/lasx-xvmadd.c | 742 + + .../loongarch/vector/lasx/lasx-xvmaddwev-1.c | 856 + + .../loongarch/vector/lasx/lasx-xvmaddwev-2.c | 723 + + .../loongarch/vector/lasx/lasx-xvmaddwev-3.c | 940 + + .../loongarch/vector/lasx/lasx-xvmaddwod-1.c | 742 + + .../loongarch/vector/lasx/lasx-xvmaddwod-2.c | 799 + + .../loongarch/vector/lasx/lasx-xvmaddwod-3.c | 820 + + .../loongarch/vector/lasx/lasx-xvmax-1.c | 545 + + .../loongarch/vector/lasx/lasx-xvmax-2.c | 560 + + .../loongarch/vector/lasx/lasx-xvmaxi-1.c | 471 + + .../loongarch/vector/lasx/lasx-xvmaxi-2.c | 504 + + .../loongarch/vector/lasx/lasx-xvmin-1.c | 575 + + .../loongarch/vector/lasx/lasx-xvmin-2.c | 680 + + .../loongarch/vector/lasx/lasx-xvmini-1.c | 416 + + .../loongarch/vector/lasx/lasx-xvmini-2.c | 284 + + .../loongarch/vector/lasx/lasx-xvmod-1.c | 395 + + .../loongarch/vector/lasx/lasx-xvmod-2.c | 410 + + .../loongarch/vector/lasx/lasx-xvmskgez.c | 86 + + .../loongarch/vector/lasx/lasx-xvmskltz.c | 373 + + .../loongarch/vector/lasx/lasx-xvmsknz.c | 163 + + .../loongarch/vector/lasx/lasx-xvmsub.c | 647 + + .../loongarch/vector/lasx/lasx-xvmuh-1.c | 650 + + .../loongarch/vector/lasx/lasx-xvmuh-2.c | 635 + + .../loongarch/vector/lasx/lasx-xvmul.c | 620 + + .../loongarch/vector/lasx/lasx-xvmulwev-1.c | 590 + + .../loongarch/vector/lasx/lasx-xvmulwev-2.c | 590 + + .../loongarch/vector/lasx/lasx-xvmulwev-3.c | 605 + + .../loongarch/vector/lasx/lasx-xvmulwod-1.c | 545 + + .../loongarch/vector/lasx/lasx-xvmulwod-2.c | 470 + + .../loongarch/vector/lasx/lasx-xvmulwod-3.c | 440 + + .../loongarch/vector/lasx/lasx-xvneg.c | 526 + + .../loongarch/vector/lasx/lasx-xvnor.c | 170 + + .../loongarch/vector/lasx/lasx-xvnori.c | 152 + + .../loongarch/vector/lasx/lasx-xvor.c | 215 + + .../loongarch/vector/lasx/lasx-xvori.c | 141 + + .../loongarch/vector/lasx/lasx-xvorn.c | 245 + + .../loongarch/vector/lasx/lasx-xvpackev.c | 501 + + .../loongarch/vector/lasx/lasx-xvpackod.c | 575 + + .../loongarch/vector/lasx/lasx-xvpcnt.c | 526 + + .../loongarch/vector/lasx/lasx-xvpickev.c | 515 + + .../loongarch/vector/lasx/lasx-xvpickod.c | 530 + + .../loongarch/vector/lasx/lasx-xvpickve.c | 130 + + .../loongarch/vector/lasx/lasx-xvpickve2gr.c | 388 + + .../loongarch/vector/lasx/lasx-xvprem.c | 20 + + .../loongarch/vector/lasx/lasx-xvpremi.c | 20 + + .../loongarch/vector/lasx/lasx-xvreplgr2vr.c | 380 + + .../loongarch/vector/lasx/lasx-xvreplve.c | 536 + + .../loongarch/vector/lasx/lasx-xvreplve0.c | 471 + + .../loongarch/vector/lasx/lasx-xvreplvei.c | 20 + + .../loongarch/vector/lasx/lasx-xvrotr.c | 530 + + .../loongarch/vector/lasx/lasx-xvrotri.c | 394 + + .../loongarch/vector/lasx/lasx-xvsadd-1.c | 650 + + .../loongarch/vector/lasx/lasx-xvsadd-2.c | 350 + + .../loongarch/vector/lasx/lasx-xvsat-1.c | 537 + + .../loongarch/vector/lasx/lasx-xvsat-2.c | 427 + + .../loongarch/vector/lasx/lasx-xvseq.c | 650 + + .../loongarch/vector/lasx/lasx-xvseqi.c | 449 + + .../loongarch/vector/lasx/lasx-xvshuf4i_b.c | 430 + + .../loongarch/vector/lasx/lasx-xvshuf_b.c | 761 + + .../loongarch/vector/lasx/lasx-xvsigncov.c | 665 + + .../loongarch/vector/lasx/lasx-xvsle-1.c | 575 + + .../loongarch/vector/lasx/lasx-xvsle-2.c | 590 + + .../loongarch/vector/lasx/lasx-xvslei-1.c | 515 + + .../loongarch/vector/lasx/lasx-xvslei-2.c | 438 + + .../loongarch/vector/lasx/lasx-xvsll.c | 425 + + .../loongarch/vector/lasx/lasx-xvslli.c | 416 + + .../loongarch/vector/lasx/lasx-xvsllwil-1.c | 339 + + .../loongarch/vector/lasx/lasx-xvsllwil-2.c | 350 + + .../loongarch/vector/lasx/lasx-xvslt-1.c | 455 + + .../loongarch/vector/lasx/lasx-xvslt-2.c | 620 + + .../loongarch/vector/lasx/lasx-xvslti-1.c | 548 + + .../loongarch/vector/lasx/lasx-xvslti-2.c | 416 + + .../loongarch/vector/lasx/lasx-xvsra.c | 545 + + .../loongarch/vector/lasx/lasx-xvsrai.c | 504 + + .../loongarch/vector/lasx/lasx-xvsran.c | 455 + + .../loongarch/vector/lasx/lasx-xvsrani.c | 545 + + .../loongarch/vector/lasx/lasx-xvsrar.c | 725 + + .../loongarch/vector/lasx/lasx-xvsrari.c | 471 + + .../loongarch/vector/lasx/lasx-xvsrarn.c | 500 + + .../loongarch/vector/lasx/lasx-xvsrarni.c | 636 + + .../loongarch/vector/lasx/lasx-xvsrl.c | 650 + + .../loongarch/vector/lasx/lasx-xvsrli.c | 405 + + .../loongarch/vector/lasx/lasx-xvsrln.c | 425 + + .../loongarch/vector/lasx/lasx-xvsrlni.c | 680 + + .../loongarch/vector/lasx/lasx-xvsrlr.c | 515 + + .../loongarch/vector/lasx/lasx-xvsrlri.c | 416 + + .../loongarch/vector/lasx/lasx-xvsrlrn.c | 410 + + .../loongarch/vector/lasx/lasx-xvsrlrni.c | 455 + + .../loongarch/vector/lasx/lasx-xvssran.c | 905 + + .../loongarch/vector/lasx/lasx-xvssrani.c | 1235 + + .../loongarch/vector/lasx/lasx-xvssrarn.c | 905 + + .../loongarch/vector/lasx/lasx-xvssrarni.c | 1160 + + .../loongarch/vector/lasx/lasx-xvssrln.c | 965 + + .../loongarch/vector/lasx/lasx-xvssrlni.c | 1130 + + .../loongarch/vector/lasx/lasx-xvssrlrn.c | 815 + + .../loongarch/vector/lasx/lasx-xvssrlrni.c | 1160 + + .../loongarch/vector/lasx/lasx-xvssub-1.c | 425 + + .../loongarch/vector/lasx/lasx-xvssub-2.c | 695 + + .../loongarch/vector/lasx/lasx-xvst.c | 102 + + .../loongarch/vector/lasx/lasx-xvstelm.c | 14 + + .../loongarch/vector/lasx/lasx-xvsub.c | 590 + + .../loongarch/vector/lasx/lasx-xvsubi.c | 482 + + .../loongarch/vector/lasx/lasx-xvsubwev-1.c | 530 + + .../loongarch/vector/lasx/lasx-xvsubwev-2.c | 440 + + .../loongarch/vector/lasx/lasx-xvsubwod-1.c | 695 + + .../loongarch/vector/lasx/lasx-xvsubwod-2.c | 620 + + .../loongarch/vector/lasx/lasx-xvxor.c | 185 + + .../loongarch/vector/lasx/lasx-xvxori.c | 163 + + .../loongarch/vector/loongarch-vector.exp | 42 + + .../loongarch/{ => vector/lsx}/lsx-builtin.c | 4341 +- + .../gcc.target/loongarch/vector/lsx/lsx-sad.c | 20 + + .../loongarch/vector/lsx/lsx-vabsd-1.c | 272 + + .../loongarch/vector/lsx/lsx-vabsd-2.c | 398 + + .../loongarch/vector/lsx/lsx-vadd.c | 416 + + .../loongarch/vector/lsx/lsx-vadda.c | 344 + + .../loongarch/vector/lsx/lsx-vaddi.c | 251 + + .../loongarch/vector/lsx/lsx-vaddwev-1.c | 335 + + .../loongarch/vector/lsx/lsx-vaddwev-2.c | 344 + + .../loongarch/vector/lsx/lsx-vaddwev-3.c | 425 + + .../loongarch/vector/lsx/lsx-vaddwod-1.c | 408 + + .../loongarch/vector/lsx/lsx-vaddwod-2.c | 344 + + .../loongarch/vector/lsx/lsx-vaddwod-3.c | 237 + + .../loongarch/vector/lsx/lsx-vand.c | 159 + + .../loongarch/vector/lsx/lsx-vandi.c | 67 + + .../loongarch/vector/lsx/lsx-vandn.c | 129 + + .../loongarch/vector/lsx/lsx-vavg-1.c | 398 + + .../loongarch/vector/lsx/lsx-vavg-2.c | 308 + + .../loongarch/vector/lsx/lsx-vavgr-1.c | 299 + + .../loongarch/vector/lsx/lsx-vavgr-2.c | 317 + + .../loongarch/vector/lsx/lsx-vbitclr.c | 461 + + .../loongarch/vector/lsx/lsx-vbitclri.c | 279 + + .../loongarch/vector/lsx/lsx-vbitrev.c | 407 + + .../loongarch/vector/lsx/lsx-vbitrevi.c | 336 + + .../loongarch/vector/lsx/lsx-vbitsel.c | 109 + + .../loongarch/vector/lsx/lsx-vbitseli.c | 84 + + .../loongarch/vector/lsx/lsx-vbitset.c | 371 + + .../loongarch/vector/lsx/lsx-vbitseti.c | 279 + + .../loongarch/vector/lsx/lsx-vbsll.c | 83 + + .../loongarch/vector/lsx/lsx-vbsrl.c | 55 + + .../loongarch/vector/lsx/lsx-vclo.c | 266 + + .../loongarch/vector/lsx/lsx-vclz.c | 265 + + .../loongarch/vector/lsx/lsx-vdiv-1.c | 299 + + .../loongarch/vector/lsx/lsx-vdiv-2.c | 254 + + .../loongarch/vector/lsx/lsx-vexth-1.c | 342 + + .../loongarch/vector/lsx/lsx-vexth-2.c | 182 + + .../loongarch/vector/lsx/lsx-vextl-1.c | 83 + + .../loongarch/vector/lsx/lsx-vextl-2.c | 83 + + .../loongarch/vector/lsx/lsx-vextrins.c | 479 + + .../loongarch/vector/lsx/lsx-vfadd_d.c | 407 + + .../loongarch/vector/lsx/lsx-vfadd_s.c | 470 + + .../loongarch/vector/lsx/lsx-vfclass_d.c | 83 + + .../loongarch/vector/lsx/lsx-vfclass_s.c | 74 + + .../loongarch/vector/lsx/lsx-vfcmp_caf.c | 244 + + .../loongarch/vector/lsx/lsx-vfcmp_ceq.c | 516 + + .../loongarch/vector/lsx/lsx-vfcmp_cle.c | 530 + + .../loongarch/vector/lsx/lsx-vfcmp_clt.c | 476 + + .../loongarch/vector/lsx/lsx-vfcmp_cne.c | 378 + + .../loongarch/vector/lsx/lsx-vfcmp_cor.c | 170 + + .../loongarch/vector/lsx/lsx-vfcmp_cun.c | 253 + + .../loongarch/vector/lsx/lsx-vfcmp_saf.c | 214 + + .../loongarch/vector/lsx/lsx-vfcmp_seq.c | 450 + + .../loongarch/vector/lsx/lsx-vfcmp_sle.c | 407 + + .../loongarch/vector/lsx/lsx-vfcmp_slt.c | 512 + + .../loongarch/vector/lsx/lsx-vfcmp_sne.c | 398 + + .../loongarch/vector/lsx/lsx-vfcmp_sor.c | 269 + + .../loongarch/vector/lsx/lsx-vfcmp_sun.c | 335 + + .../loongarch/vector/lsx/lsx-vfcvt-1.c | 398 + + .../loongarch/vector/lsx/lsx-vfcvt-2.c | 278 + + .../loongarch/vector/lsx/lsx-vffint-1.c | 161 + + .../loongarch/vector/lsx/lsx-vffint-2.c | 264 + + .../loongarch/vector/lsx/lsx-vffint-3.c | 102 + + .../loongarch/vector/lsx/lsx-vflogb_d.c | 76 + + .../loongarch/vector/lsx/lsx-vflogb_s.c | 185 + + .../loongarch/vector/lsx/lsx-vfmadd_d.c | 251 + + .../loongarch/vector/lsx/lsx-vfmadd_s.c | 381 + + .../loongarch/vector/lsx/lsx-vfmax_d.c | 200 + + .../loongarch/vector/lsx/lsx-vfmax_s.c | 335 + + .../loongarch/vector/lsx/lsx-vfmaxa_d.c | 155 + + .../loongarch/vector/lsx/lsx-vfmaxa_s.c | 230 + + .../loongarch/vector/lsx/lsx-vfnmadd_d.c | 196 + + .../loongarch/vector/lsx/lsx-vfnmadd_s.c | 381 + + .../loongarch/vector/lsx/lsx-vfrint_d.c | 230 + + .../loongarch/vector/lsx/lsx-vfrint_s.c | 350 + + .../loongarch/vector/lsx/lsx-vfrstp.c | 218 + + .../loongarch/vector/lsx/lsx-vfrstpi.c | 209 + + .../loongarch/vector/lsx/lsx-vfsqrt_d.c | 216 + + .../loongarch/vector/lsx/lsx-vfsqrt_s.c | 372 + + .../loongarch/vector/lsx/lsx-vftint-1.c | 349 + + .../loongarch/vector/lsx/lsx-vftint-2.c | 695 + + .../loongarch/vector/lsx/lsx-vftint-3.c | 1028 + + .../loongarch/vector/lsx/lsx-vftint-4.c | 345 + + .../loongarch/vector/lsx/lsx-vhaddw-1.c | 488 + + .../loongarch/vector/lsx/lsx-vhaddw-2.c | 452 + + .../loongarch/vector/lsx/lsx-vhsubw-1.c | 327 + + .../loongarch/vector/lsx/lsx-vhsubw-2.c | 353 + + .../loongarch/vector/lsx/lsx-vilvh.c | 353 + + .../loongarch/vector/lsx/lsx-vilvl.c | 327 + + .../loongarch/vector/lsx/lsx-vinsgr2vr.c | 278 + + .../gcc.target/loongarch/vector/lsx/lsx-vld.c | 62 + + .../loongarch/vector/lsx/lsx-vldi.c | 61 + + .../loongarch/vector/lsx/lsx-vmadd.c | 450 + + .../loongarch/vector/lsx/lsx-vmaddwev-1.c | 472 + + .../loongarch/vector/lsx/lsx-vmaddwev-2.c | 383 + + .../loongarch/vector/lsx/lsx-vmaddwev-3.c | 383 + + .../loongarch/vector/lsx/lsx-vmaddwod-1.c | 372 + + .../loongarch/vector/lsx/lsx-vmaddwod-2.c | 438 + + .../loongarch/vector/lsx/lsx-vmaddwod-3.c | 460 + + .../loongarch/vector/lsx/lsx-vmax-1.c | 317 + + .../loongarch/vector/lsx/lsx-vmax-2.c | 362 + + .../loongarch/vector/lsx/lsx-vmaxi-1.c | 279 + + .../loongarch/vector/lsx/lsx-vmaxi-2.c | 223 + + .../loongarch/vector/lsx/lsx-vmin-1.c | 434 + + .../loongarch/vector/lsx/lsx-vmin-2.c | 344 + + .../loongarch/vector/lsx/lsx-vmini-1.c | 314 + + .../loongarch/vector/lsx/lsx-vmini-2.c | 216 + + .../loongarch/vector/lsx/lsx-vmod-1.c | 254 + + .../loongarch/vector/lsx/lsx-vmod-2.c | 254 + + .../loongarch/vector/lsx/lsx-vmskgez.c | 119 + + .../loongarch/vector/lsx/lsx-vmskltz.c | 321 + + .../loongarch/vector/lsx/lsx-vmsknz.c | 104 + + .../loongarch/vector/lsx/lsx-vmsub.c | 461 + + .../loongarch/vector/lsx/lsx-vmuh-1.c | 353 + + .../loongarch/vector/lsx/lsx-vmuh-2.c | 372 + + .../loongarch/vector/lsx/lsx-vmul.c | 282 + + .../loongarch/vector/lsx/lsx-vmulwev-1.c | 434 + + .../loongarch/vector/lsx/lsx-vmulwev-2.c | 344 + + .../loongarch/vector/lsx/lsx-vmulwev-3.c | 245 + + .../loongarch/vector/lsx/lsx-vmulwod-1.c | 272 + + .../loongarch/vector/lsx/lsx-vmulwod-2.c | 282 + + .../loongarch/vector/lsx/lsx-vmulwod-3.c | 308 + + .../loongarch/vector/lsx/lsx-vneg.c | 321 + + .../loongarch/vector/lsx/lsx-vnor.c | 109 + + .../loongarch/vector/lsx/lsx-vnori.c | 91 + + .../gcc.target/loongarch/vector/lsx/lsx-vor.c | 169 + + .../loongarch/vector/lsx/lsx-vori.c | 123 + + .../loongarch/vector/lsx/lsx-vorn.c | 109 + + .../loongarch/vector/lsx/lsx-vpackev.c | 452 + + .../loongarch/vector/lsx/lsx-vpackod.c | 461 + + .../loongarch/vector/lsx/lsx-vpcnt.c | 350 + + .../loongarch/vector/lsx/lsx-vpickev.c | 362 + + .../loongarch/vector/lsx/lsx-vpickod.c | 336 + + .../loongarch/vector/lsx/lsx-vpickve2gr.c | 488 + + .../loongarch/vector/lsx/lsx-vpremi.c | 20 + + .../loongarch/vector/lsx/lsx-vreplgr2vr.c | 212 + + .../loongarch/vector/lsx/lsx-vreplve.c | 300 + + .../loongarch/vector/lsx/lsx-vreplvei.c | 293 + + .../loongarch/vector/lsx/lsx-vrotr.c | 381 + + .../loongarch/vector/lsx/lsx-vrotri.c | 294 + + .../loongarch/vector/lsx/lsx-vsadd-1.c | 335 + + .../loongarch/vector/lsx/lsx-vsadd-2.c | 345 + + .../loongarch/vector/lsx/lsx-vsat-1.c | 231 + + .../loongarch/vector/lsx/lsx-vsat-2.c | 272 + + .../loongarch/vector/lsx/lsx-vseq.c | 470 + + .../loongarch/vector/lsx/lsx-vseqi.c | 328 + + .../loongarch/vector/lsx/lsx-vshuf.c | 394 + + .../loongarch/vector/lsx/lsx-vshuf4i.c | 348 + + .../loongarch/vector/lsx/lsx-vsigncov.c | 425 + + .../loongarch/vector/lsx/lsx-vsle-1.c | 290 + + .../loongarch/vector/lsx/lsx-vsle-2.c | 444 + + .../loongarch/vector/lsx/lsx-vslei-1.c | 258 + + .../loongarch/vector/lsx/lsx-vslei-2.c | 293 + + .../loongarch/vector/lsx/lsx-vsll.c | 254 + + .../loongarch/vector/lsx/lsx-vslli.c | 293 + + .../loongarch/vector/lsx/lsx-vsllwil-1.c | 244 + + .../loongarch/vector/lsx/lsx-vsllwil-2.c | 189 + + .../loongarch/vector/lsx/lsx-vslt-1.c | 434 + + .../loongarch/vector/lsx/lsx-vslt-2.c | 236 + + .../loongarch/vector/lsx/lsx-vslti-1.c | 328 + + .../loongarch/vector/lsx/lsx-vslti-2.c | 293 + + .../loongarch/vector/lsx/lsx-vsra.c | 344 + + .../loongarch/vector/lsx/lsx-vsrai.c | 258 + + .../loongarch/vector/lsx/lsx-vsran.c | 290 + + .../loongarch/vector/lsx/lsx-vsrani.c | 246 + + .../loongarch/vector/lsx/lsx-vsrar.c | 354 + + .../loongarch/vector/lsx/lsx-vsrari.c | 265 + + .../loongarch/vector/lsx/lsx-vsrarn.c | 236 + + .../loongarch/vector/lsx/lsx-vsrarni.c | 398 + + .../loongarch/vector/lsx/lsx-vsrl.c | 389 + + .../loongarch/vector/lsx/lsx-vsrli.c | 328 + + .../loongarch/vector/lsx/lsx-vsrln.c | 335 + + .../loongarch/vector/lsx/lsx-vsrlni.c | 281 + + .../loongarch/vector/lsx/lsx-vsrlr.c | 434 + + .../loongarch/vector/lsx/lsx-vsrlri.c | 300 + + .../loongarch/vector/lsx/lsx-vsrlrn.c | 164 + + .../loongarch/vector/lsx/lsx-vsrlrni.c | 686 + + .../loongarch/vector/lsx/lsx-vssran.c | 390 + + .../loongarch/vector/lsx/lsx-vssrani.c | 679 + + .../loongarch/vector/lsx/lsx-vssrarn.c | 669 + + .../loongarch/vector/lsx/lsx-vssrarni.c | 848 + + .../loongarch/vector/lsx/lsx-vssrln.c | 543 + + .../loongarch/vector/lsx/lsx-vssrlni.c | 668 + + .../loongarch/vector/lsx/lsx-vssrlrn.c | 470 + + .../loongarch/vector/lsx/lsx-vssrlrni.c | 597 + + .../loongarch/vector/lsx/lsx-vssub-1.c | 398 + + .../loongarch/vector/lsx/lsx-vssub-2.c | 408 + + .../gcc.target/loongarch/vector/lsx/lsx-vst.c | 70 + + .../loongarch/vector/lsx/lsx-vsub.c | 381 + + .../loongarch/vector/lsx/lsx-vsubi.c | 329 + + .../loongarch/vector/lsx/lsx-vsubwev-1.c | 326 + + .../loongarch/vector/lsx/lsx-vsubwev-2.c | 417 + + .../loongarch/vector/lsx/lsx-vsubwod-1.c | 326 + + .../loongarch/vector/lsx/lsx-vsubwod-2.c | 308 + + .../loongarch/vector/lsx/lsx-vxor.c | 79 + + .../loongarch/vector/lsx/lsx-vxori.c | 67 + + .../loongarch/vector/simd_correctness_check.h | 54 + + 437 files changed, 171880 insertions(+), 160945 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic.exp + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic_add_fetch.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic_compare_exchange.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic_compare_exchange_n.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic_load.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic_store.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic_swap.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/cmov_ff.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/cmov_fi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/cmov_if.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/cmov_ii.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/fcopysign.c + delete mode 100644 gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/larch-builtin.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/math-float-128.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-inline-lasx-strict-align.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-inline-lasx.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-inline-noalign.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-inline-strict-align.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr106459.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr112476-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr112476-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/prolog-opt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/recip_sqrt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-realign.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-usage-realign.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/strict-align.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c + rename gcc/testsuite/gcc.target/loongarch/{ => vector/lasx}/lasx-builtin.c (51%) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-sad.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp + rename gcc/testsuite/gcc.target/loongarch/{ => vector/lsx}/lsx-builtin.c (51%) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-sad.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h + +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic.exp b/gcc/testsuite/gcc.target/loongarch/atomic/atomic.exp +new file mode 100644 +index 000000000..bebc00047 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic.exp +@@ -0,0 +1,40 @@ ++# Copyright (C) 2020-2022 Free Software Foundation, Inc. ++ ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3 of the License, or ++# (at your option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++# GCC testsuite that uses the `dg.exp' driver. ++ ++# Exit immediately if this isn't a Loongarch target. ++if ![istarget loongarch*-*-*] then { ++ return ++} ++ ++# Load support procs. ++load_lib gcc-dg.exp ++ ++# If a testcase doesn't have special options, use these. ++global DEFAULT_CFLAGS ++if ![info exists DEFAULT_CFLAGS] then { ++ set DEFAULT_CFLAGS " " ++} ++ ++# Initialize `dg'. ++dg-init ++ ++# Main loop. ++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] \ ++ "" $DEFAULT_CFLAGS ++# All done. ++dg-finish +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic_add_fetch.c b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_add_fetch.c +new file mode 100644 +index 000000000..8c2b2470b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_add_fetch.c +@@ -0,0 +1,16 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -march=la664" } */ ++/* { dg-final { scan-assembler "amadd\\.b" } } */ ++/* { dg-final { scan-assembler "amadd\\.h" } } */ ++ ++void ++atomic_add_fetch_b (char *src) ++{ ++ __atomic_add_fetch (src, 1, __ATOMIC_RELEASE); ++} ++ ++void ++atomic_add_fetch_h (short *src) ++{ ++ __atomic_add_fetch (src, 1, __ATOMIC_RELEASE); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic_compare_exchange.c b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_compare_exchange.c +new file mode 100644 +index 000000000..8a071732f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_compare_exchange.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -march=loongarch64" } */ ++/* { dg-final { scan-assembler "dbar\t0x11" } } */ ++ ++void ++atomic_compare_exchange_w (int *ptr, int *expected, int *desired) ++{ ++ __atomic_compare_exchange (ptr, expected, desired, 0, __ATOMIC_RELEASE, __ATOMIC_ACQUIRE); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic_compare_exchange_n.c b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_compare_exchange_n.c +new file mode 100644 +index 000000000..280c99aa1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_compare_exchange_n.c +@@ -0,0 +1,59 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -march=la664 -mabi=lp64d" } */ ++/* { dg-final { scan-assembler "atomic_compare_exchange_n_b:.*amcas\\.b.*atomic_compare_exchange_n_b" } } */ ++/* { dg-final { scan-assembler "atomic_compare_exchange_n_h:.*amcas\\.h.*atomic_compare_exchange_n_h" } } */ ++/* { dg-final { scan-assembler "atomic_compare_exchange_n_w:.*amcas\\.w.*atomic_compare_exchange_n_w" } } */ ++/* { dg-final { scan-assembler "atomic_compare_exchange_n_d:.*amcas\\.d.*atomic_compare_exchange_n_d" } } */ ++/* { dg-final { scan-assembler "atomic_compare_exchange_n_db_b:.*amcas_db\\.b.*atomic_compare_exchange_n_db_b" } } */ ++/* { dg-final { scan-assembler "atomic_compare_exchange_n_db_h:.*amcas_db\\.h.*atomic_compare_exchange_n_db_h" } } */ ++/* { dg-final { scan-assembler "atomic_compare_exchange_n_db_w:.*amcas_db\\.w.*atomic_compare_exchange_n_db_w" } } */ ++/* { dg-final { scan-assembler "atomic_compare_exchange_n_db_d:.*amcas_db\\.d.*atomic_compare_exchange_n_db_d" } } */ ++ ++void ++atomic_compare_exchange_n_b (char *old, char *exp) ++{ ++ __atomic_compare_exchange_n (old, exp, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); ++} ++ ++void ++atomic_compare_exchange_n_h (short *old, short *exp) ++{ ++ __atomic_compare_exchange_n (old, exp, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); ++} ++ ++void ++atomic_compare_exchange_n_w (int *old, int *exp) ++{ ++ __atomic_compare_exchange_n (old, exp, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); ++} ++ ++void ++atomic_compare_exchange_n_d (long *old, long *exp) ++{ ++ __atomic_compare_exchange_n (old, exp, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); ++} ++ ++void ++atomic_compare_exchange_n_db_b (char *old, char *exp) ++{ ++ __atomic_compare_exchange_n (old, exp, 1, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); ++} ++ ++void ++atomic_compare_exchange_n_db_h (short *old, short *exp) ++{ ++ __atomic_compare_exchange_n (old, exp, 1, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); ++} ++ ++void ++atomic_compare_exchange_n_db_w (int *old, int *exp) ++{ ++ __atomic_compare_exchange_n (old, exp, 1, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); ++} ++ ++void ++atomic_compare_exchange_n_db_d (long *old, long *exp) ++{ ++ __atomic_compare_exchange_n (old, exp, 1, 0, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); ++} ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic_load.c b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_load.c +new file mode 100644 +index 000000000..57f46996d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_load.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler "dbar\t0x14" } } */ ++ ++void ++atomic_load (int *src, int *dst) ++{ ++ __atomic_load (src, dst, __ATOMIC_ACQUIRE); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic_store.c b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_store.c +new file mode 100644 +index 000000000..83cd6eb2c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_store.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler "dbar\t0x12" } } */ ++ ++void ++atomic_store (int *src, int *dst) ++{ ++ __atomic_store (src, dst, __ATOMIC_RELEASE); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic_swap.c b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_swap.c +new file mode 100644 +index 000000000..462f19a1c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_swap.c +@@ -0,0 +1,17 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -march=la664" } */ ++/* { dg-final { scan-assembler "amswap_db\\.b" } } */ ++/* { dg-final { scan-assembler "amswap_db\\.h" } } */ ++ ++void ++atomic_exchange_b (char *dst, char *src, char *ret) ++{ ++ __atomic_exchange (dst, src, ret, __ATOMIC_ACQ_REL); ++} ++ ++void ++atomic_exchange_h (short *dst, short *src, short *ret) ++{ ++ __atomic_exchange (dst, src, ret, __ATOMIC_SEQ_CST); ++} ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence1.c b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence1.c +new file mode 100644 +index 000000000..838ac7ac0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence1.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler-not "dbar" } } */ ++ ++void ++atomic_thread_fence (void) ++{ ++ __atomic_thread_fence (__ATOMIC_RELAXED); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence2.c b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence2.c +new file mode 100644 +index 000000000..8349ea8d4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence2.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler "dbar\t0x14" } } */ ++ ++void ++atomic_thread_fence (void) ++{ ++ __atomic_thread_fence (__ATOMIC_ACQUIRE); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence3.c b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence3.c +new file mode 100644 +index 000000000..c2472637f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/atomic/atomic_thread_fence3.c +@@ -0,0 +1,15 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler-times "dbar\t0x10" 2} } */ ++ ++void ++atomic_thread_fence (void) ++{ ++ __atomic_thread_fence (__ATOMIC_ACQ_REL); ++} ++ ++void ++atomic_thread_fence_rel (void) ++{ ++ __atomic_thread_fence (__ATOMIC_RELEASE); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/cmov_ff.c b/gcc/testsuite/gcc.target/loongarch/cmov_ff.c +new file mode 100644 +index 000000000..f8a910afd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/cmov_ff.c +@@ -0,0 +1,17 @@ ++/* Test asm const. */ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -S" } */ ++/* { dg-final { scan-assembler-times "main:.*fcmp.*fsel.*" 1 } } */ ++#include ++ ++extern void foo_ff(float*, float*, float*, float*); ++ ++int main(void) ++{ ++ float a,b; ++ float c,d,out; ++ foo_ff(&a, &b, &c, &d); ++ out = a>b?c:d; ++ printf("%f\n", out); ++} ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/cmov_fi.c b/gcc/testsuite/gcc.target/loongarch/cmov_fi.c +new file mode 100644 +index 000000000..634c40367 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/cmov_fi.c +@@ -0,0 +1,17 @@ ++/* Test asm const. */ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -S" } */ ++/* { dg-final { scan-assembler-times "main:.*movgr2fr.*movgr2fr.*fsel.*movfr2gr.*" 1 } } */ ++#include ++ ++extern void foo_fi(float*, float*, int*, int*); ++ ++int main(void) ++{ ++ float a,b; ++ int c,d,out; ++ foo_fi(&a, &b, &c, &d); ++ out = a>b?c:d; ++ printf("%f\n", out); ++} ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/cmov_if.c b/gcc/testsuite/gcc.target/loongarch/cmov_if.c +new file mode 100644 +index 000000000..110d693ed +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/cmov_if.c +@@ -0,0 +1,17 @@ ++/* Test asm const. */ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -S" } */ ++/* { dg-final { scan-assembler-times "main:.*movgr2fr.*movfr2cf.*fsel.*" 1 } } */ ++#include ++ ++extern void foo_if(int*, int*, float*, float*); ++ ++int main(void) ++{ ++ int a,b; ++ float c,d,out; ++ foo_if(&a, &b, &c, &d); ++ out = a==b?c:d; ++ printf("%f\n", out); ++} ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/cmov_ii.c b/gcc/testsuite/gcc.target/loongarch/cmov_ii.c +new file mode 100644 +index 000000000..6847ae208 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/cmov_ii.c +@@ -0,0 +1,17 @@ ++/* Test asm const. */ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -S" } */ ++/* { dg-final { scan-assembler-times "main:.*xor.*masknez.*maskeqz.*or.*\|main:.*xor.*maskeqz.*masknez.*or.*" 1 } } */ ++#include ++ ++extern void foo_ii(int*, int*, int*, int*); ++ ++int main(void) ++{ ++ int a,b; ++ int c,d,out; ++ foo_ii(&a, &b, &c, &d); ++ out = a==b?c:d; ++ printf("%d\n", out); ++} ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/fcopysign.c b/gcc/testsuite/gcc.target/loongarch/fcopysign.c +new file mode 100644 +index 000000000..fdb405c8e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/fcopysign.c +@@ -0,0 +1,17 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mdouble-float" } */ ++/* { dg-final { scan-assembler "fcopysign\\.s" } } */ ++/* { dg-final { scan-assembler "fcopysign\\.d" } } */ ++ ++double ++my_copysign (double a, double b) ++{ ++ return __builtin_copysign (a, b); ++} ++ ++float ++my_copysignf (float a, float b) ++{ ++ return __builtin_copysignf (a, b); ++} ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c b/gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c +deleted file mode 100644 +index fa24ed4dd..000000000 +--- a/gcc/testsuite/gcc.target/loongarch/insn_correctness_check.c ++++ /dev/null +@@ -1,159432 +0,0 @@ +-/* { dg-do run } */ +-/* { dg-options "-mlsx -mlasx -w" } */ +-/* { dg-timeout 500 } */ +- +-#include +-#include +-#include +-#include +-#include +- +-#define ASSERTEQ_64(line, ref, res) \ +-do{ \ +- int fail = 0; \ +- for(size_t i = 0; i < sizeof(res)/sizeof(res[0]); ++i){ \ +- long *temp_ref = &ref[i], *temp_res = &res[i]; \ +- if(abs(*temp_ref - *temp_res) > 0){ \ +- printf(" error: %s at line %ld , expected "#ref"[%ld]:0x%lx, got: 0x%lx\n", \ +- __FILE__, line, i, *temp_ref, *temp_res); \ +- fail = 1; \ +- } \ +- } \ +- if(fail == 1) abort(); \ +-}while(0) +- +-#define ASSERTEQ_32(line, ref, res) \ +-do{ \ +- int fail = 0; \ +- for(size_t i = 0; i < sizeof(res)/sizeof(res[0]); ++i){ \ +- int *temp_ref = &ref[i], *temp_res = &res[i]; \ +- if(abs(*temp_ref - *temp_res) > 0){ \ +- printf(" error: %s at line %ld , expected "#ref"[%ld]:0x%x, got: 0x%x\n", \ +- __FILE__, line, i, *temp_ref, *temp_res); \ +- fail = 1; \ +- } \ +- } \ +- if(fail == 1) abort(); \ +-}while(0) +- +-#define ASSERTEQ_int(line, ref, res) \ +-do{ \ +- if (ref != res){ \ +- printf(" error: %s at line %ld , expected %d, got %d\n", \ +- __FILE__, line, ref, res); \ +- } \ +-}while(0) +- +-int main() { +- __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; +- __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; +- __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; +- +- __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; +- __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; +- __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; +- +- int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; +- long int long_op0, long_op1, long_op2, lont_out, lont_result; +- long int long_int_out, long_int_result; +- unsigned int unsigned_int_out, unsigned_int_result; +- unsigned long int unsigned_long_int_out, unsigned_long_int_result; +- +- *((int*)& __m128_op0[3]) = 0x0000c77c; +- *((int*)& __m128_op0[2]) = 0x000047cd; +- *((int*)& __m128_op0[1]) = 0x0000c0f1; +- *((int*)& __m128_op0[0]) = 0x00006549; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_op1[3]) = 0x34ec5670cd4b5ec0; +- *((unsigned long*)& __m256i_op1[2]) = 0x4f111e4b8e0d7291; +- *((unsigned long*)& __m256i_op1[1]) = 0xeaa81f47dc3bdd09; +- *((unsigned long*)& __m256i_op1[0]) = 0x0e0d5fde5df99830; +- *((unsigned long*)& __m256i_op2[3]) = 0x80c72fcd40fb3bc0; +- *((unsigned long*)& __m256i_op2[2]) = 0x84bd087966d4ace0; +- *((unsigned long*)& __m256i_op2[1]) = 0x26aa68b274dc1322; +- *((unsigned long*)& __m256i_op2[0]) = 0xe072db2bb9d4cd40; +- *((unsigned long*)& __m256i_result[3]) = 0x044819410d87e69a; +- *((unsigned long*)& __m256i_result[2]) = 0x21d3905ae3e93be0; +- *((unsigned long*)& __m256i_result[1]) = 0x5125883a30da0f20; +- *((unsigned long*)& __m256i_result[0]) = 0x6d7b2d3ac2777aeb; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_op1[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op1[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0xb9884ab93b0b80a0; +- *((unsigned long*)& __m128i_result[0]) = 0xf11e970c68000000; +- __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1b71a083b3dec3cd; +- *((unsigned long*)& __m128i_op1[0]) = 0x373a13323b4cdbc1; +- *((unsigned long*)& __m128i_result[1]) = 0x0802010808400820; +- *((unsigned long*)& __m128i_result[0]) = 0x8004080408100802; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000c77c000047cd; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000c0f100006549; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa486083e6536d81d; +- *((unsigned long*)& __m128i_op0[0]) = 0x58bc43853ea123ed; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000a486083e; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000058bc4385; +- __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1828f0e09bad7249; +- *((unsigned long*)& __m256i_op0[2]) = 0x07ffc1b723953cec; +- *((unsigned long*)& __m256i_op0[1]) = 0x61f2e9b333aab104; +- *((unsigned long*)& __m256i_op0[0]) = 0x6bf742aa0d7856a0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x34ec5670cd4b5ec0; +- *((unsigned long*)& __m256i_op0[2]) = 0x4f111e4b8e0d7291; +- *((unsigned long*)& __m256i_op0[1]) = 0xeaa81f47dc3bdd09; +- *((unsigned long*)& __m256i_op0[0]) = 0x0e0d5fde5df99830; +- *((unsigned long*)& __m256i_op1[3]) = 0x67390c19e4b17547; +- *((unsigned long*)& __m256i_op1[2]) = 0xbacda0f96d2cec01; +- *((unsigned long*)& __m256i_op1[1]) = 0xee20ad1adae2cc16; +- *((unsigned long*)& __m256i_op1[0]) = 0x5a2003c6a406fe53; +- *((unsigned long*)& __m256i_op2[3]) = 0x80c72fcd40fb3bc0; +- *((unsigned long*)& __m256i_op2[2]) = 0x84bd087966d4ace0; +- *((unsigned long*)& __m256i_op2[1]) = 0x26aa68b274dc1322; +- *((unsigned long*)& __m256i_op2[0]) = 0xe072db2bb9d4cd40; +- *((unsigned long*)& __m256i_result[3]) = 0x372e9d75e8aab100; +- *((unsigned long*)& __m256i_result[2]) = 0x5464fbfc416b9f71; +- *((unsigned long*)& __m256i_result[1]) = 0x31730b5beb7c99f5; +- *((unsigned long*)& __m256i_result[0]) = 0x0d8264202b8ea3f0; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa486c90f6537b8d7; +- *((unsigned long*)& __m128i_op0[0]) = 0x58bcc2013ea1cc1e; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffa486c90f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000058bcc201; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xf3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; +- *((unsigned long*)& __m256i_op0[2]) = 0x5464fbfc416b9f71; +- *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; +- *((unsigned long*)& __m256i_op0[0]) = 0x0d8264202b8ea3f0; +- *((unsigned long*)& __m256i_op1[3]) = 0x80c72fcd40fb3bc0; +- *((unsigned long*)& __m256i_op1[2]) = 0x84bd087966d4ace0; +- *((unsigned long*)& __m256i_op1[1]) = 0x26aa68b274dc1322; +- *((unsigned long*)& __m256i_op1[0]) = 0xe072db2bb9d4cd40; +- *((unsigned long*)& __m256i_result[3]) = 0xffffcd42ffffecc0; +- *((unsigned long*)& __m256i_result[2]) = 0x00000475ffff4c51; +- *((unsigned long*)& __m256i_result[1]) = 0x0000740dffffad17; +- *((unsigned long*)& __m256i_result[0]) = 0x00003f4bffff7130; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffa486c90f; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000058bcc201; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffa486c90f; +- *((unsigned long*)& __m128d_result[0]) = 0x1f52d710bf295626; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x81f7f2599f0509c2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x51136d3c78388916; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffc0fcffffcf83; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000288a00003c1c; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x053531f7c6334908; +- *((unsigned long*)& __m256d_op0[2]) = 0x8e41dcbff87e7900; +- *((unsigned long*)& __m256d_op0[1]) = 0x12eb8332e3e15093; +- *((unsigned long*)& __m256d_op0[0]) = 0x9a7491f9e016ccd4; +- *((unsigned long*)& __m256d_op1[3]) = 0x345947dcd192b5c4; +- *((unsigned long*)& __m256d_op1[2]) = 0x182100c72280e687; +- *((unsigned long*)& __m256d_op1[1]) = 0x4a1c80bb8e892e00; +- *((unsigned long*)& __m256d_op1[0]) = 0x063ecfbd58abc4b7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x34598d0fd19314cb; +- *((unsigned long*)& __m256i_op0[2]) = 0x1820939b2280fa86; +- *((unsigned long*)& __m256i_op0[1]) = 0x4a1c269b8e892a3a; +- *((unsigned long*)& __m256i_op0[0]) = 0x063f2bb758abc664; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffc0fcffffcf83; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000288a00003c1c; +- *((unsigned long*)& __m256i_result[3]) = 0x3459730f2f6d1435; +- *((unsigned long*)& __m256i_result[2]) = 0x19212d61237f2b03; +- *((unsigned long*)& __m256i_result[1]) = 0x4a1c266572772a3a; +- *((unsigned long*)& __m256i_result[0]) = 0x063f032d58557648; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3133c6409eecf8b0; +- *((unsigned long*)& __m256i_op0[2]) = 0xddf50db3c617a115; +- *((unsigned long*)& __m256i_op0[1]) = 0xa432ea5a0913dc8e; +- *((unsigned long*)& __m256i_op0[0]) = 0x29d403af367b4545; +- *((unsigned long*)& __m256i_op1[3]) = 0x38a966b31be83ee9; +- *((unsigned long*)& __m256i_op1[2]) = 0x5f6108dc25b8e028; +- *((unsigned long*)& __m256i_op1[1]) = 0xf41a56e8a20878d7; +- *((unsigned long*)& __m256i_op1[0]) = 0x683b8b67e20c8ee5; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x81f7f2599f0509c2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x51136d3c78388916; +- *((unsigned long*)& __m256i_op1[3]) = 0x044819410d87e69a; +- *((unsigned long*)& __m256i_op1[2]) = 0x21d3905ae3e93be0; +- *((unsigned long*)& __m256i_op1[1]) = 0x5125883a30da0f20; +- *((unsigned long*)& __m256i_op1[0]) = 0x6d7b2d3ac2777aeb; +- *((unsigned long*)& __m256i_result[3]) = 0x000019410000e69a; +- *((unsigned long*)& __m256i_result[2]) = 0xf259905a09c23be0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000883a00000f20; +- *((unsigned long*)& __m256i_result[0]) = 0x6d3c2d3a89167aeb; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xa486c90f; +- *((int*)& __m128_op0[2]) = 0x157ca12e; +- *((int*)& __m128_op0[1]) = 0x58bcc201; +- *((int*)& __m128_op0[0]) = 0x2e635d65; +- *((int*)& __m128_op1[3]) = 0x6d564875; +- *((int*)& __m128_op1[2]) = 0xf8760005; +- *((int*)& __m128_op1[1]) = 0x8dc5a4d1; +- *((int*)& __m128_op1[0]) = 0x79ffa22f; +- *((int*)& __m128_op2[3]) = 0xffffffff; +- *((int*)& __m128_op2[2]) = 0xd2436487; +- *((int*)& __m128_op2[1]) = 0x0fa96b88; +- *((int*)& __m128_op2[0]) = 0x5f94ab13; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xd24271c4; +- *((int*)& __m128_result[1]) = 0x2711bad1; +- *((int*)& __m128_result[0]) = 0xe8e309ed; +- __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1828f0e09bad7249; +- *((unsigned long*)& __m256i_op0[2]) = 0x07ffc1b723953cec; +- *((unsigned long*)& __m256i_op0[1]) = 0x61f2e9b333aab104; +- *((unsigned long*)& __m256i_op0[0]) = 0x6bf742aa0d7856a0; +- *((unsigned long*)& __m256i_op1[3]) = 0x000019410000e69a; +- *((unsigned long*)& __m256i_op1[2]) = 0xf259905a09c23be0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000883a00000f20; +- *((unsigned long*)& __m256i_op1[0]) = 0x6d3c2d3a89167aeb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000090100008492; +- *((unsigned long*)& __m256i_result[2]) = 0xf000104808420300; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000800000e20; +- *((unsigned long*)& __m256i_result[0]) = 0x04082d108006284b; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x04481940fbb7e6bf; +- *((unsigned long*)& __m256i_op0[2]) = 0xf2781966e6991966; +- *((unsigned long*)& __m256i_op0[1]) = 0x51258839aeda77c6; +- *((unsigned long*)& __m256i_op0[0]) = 0xcf25f0e00f1ff0e0; +- *((unsigned long*)& __m256i_result[3]) = 0x0501030100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001030100000301; +- *((unsigned long*)& __m256i_result[1]) = 0x0102000200000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0002000004030000; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffd24271c4; +- *((unsigned long*)& __m128i_op1[0]) = 0x2711bad1e8e309ed; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_result[0]) = 0x0020002000200020; +- __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x38a966b31be83ee9; +- *((unsigned long*)& __m256i_op0[2]) = 0x5f6108dc25b8e028; +- *((unsigned long*)& __m256i_op0[1]) = 0xf41a56e8a20878d7; +- *((unsigned long*)& __m256i_op0[0]) = 0x683b8b67e20c8ee5; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffcd42ffffecc0; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000475ffff4c51; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000740dffffad17; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003f4bffff7130; +- *((unsigned long*)& __m256i_result[3]) = 0x38a966b31be83ee9; +- *((unsigned long*)& __m256i_result[2]) = 0x5f6108dc25b80001; +- *((unsigned long*)& __m256i_result[1]) = 0xf41a56e8a20878d7; +- *((unsigned long*)& __m256i_result[0]) = 0x683b8b67e20c0001; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000001b3c4c0a5c; +- *((unsigned long*)& __m256i_result[3]) = 0x3c4c0a5c3c4c0a5c; +- *((unsigned long*)& __m256i_result[2]) = 0x3c4c0a5c3c4c0a5c; +- *((unsigned long*)& __m256i_result[1]) = 0x3c4c0a5c3c4c0a5c; +- *((unsigned long*)& __m256i_result[0]) = 0x3c4c0a5c3c4c0a5c; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffa486c90f; +- *((unsigned long*)& __m128i_op2[0]) = 0x1f52d710bf295626; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0501030102141923; +- *((unsigned long*)& __m256i_op0[2]) = 0xffd5020738b43ddb; +- *((unsigned long*)& __m256i_op0[1]) = 0x010200023b8e4174; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff4ff4e11410b40; +- *((unsigned long*)& __m256i_op1[3]) = 0x01fa022a01a401e5; +- *((unsigned long*)& __m256i_op1[2]) = 0x030d03aa0079029b; +- *((unsigned long*)& __m256i_op1[1]) = 0x024c01f901950261; +- *((unsigned long*)& __m256i_op1[0]) = 0x008102c2008a029f; +- *((unsigned long*)& __m256i_result[3]) = 0x0101070102041903; +- *((unsigned long*)& __m256i_result[2]) = 0xdfd506073ab435db; +- *((unsigned long*)& __m256i_result[1]) = 0x110202023bae4176; +- *((unsigned long*)& __m256i_result[0]) = 0xfff6ff4a15418b40; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0501030102141923; +- *((unsigned long*)& __m256i_op0[2]) = 0xffd5020738b43ddb; +- *((unsigned long*)& __m256i_op0[1]) = 0x010200023b8e4174; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff4ff4e11410b40; +- *((unsigned long*)& __m256i_op1[3]) = 0x000019410000e69a; +- *((unsigned long*)& __m256i_op1[2]) = 0xf259905a09c23be0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000883a00000f20; +- *((unsigned long*)& __m256i_op1[0]) = 0x6d3c2d3a89167aeb; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000501e99b; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000109973de7; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001020f22; +- *((unsigned long*)& __m256i_result[0]) = 0x00000001890b7a39; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; +- *((unsigned long*)& __m256i_op0[2]) = 0xc5c085372cfabfba; +- *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; +- *((unsigned long*)& __m256i_op0[0]) = 0x0658f2dc0eb21e3c; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000501e99b; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000109973de7; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001020f22; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000001890b7a39; +- *((unsigned long*)& __m256i_result[3]) = 0x1b974ebaf6d64d4e; +- *((unsigned long*)& __m256i_result[2]) = 0x62e0429c1b48fed1; +- *((unsigned long*)& __m256i_result[1]) = 0x18b985adf63f548c; +- *((unsigned long*)& __m256i_result[0]) = 0x032c796ecbdecc3b; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x8a228acac14e440a; +- *((unsigned long*)& __m128d_op1[0]) = 0xc77c47cdc0f16549; +- *((unsigned long*)& __m128d_op2[1]) = 0xffffffffd24271c4; +- *((unsigned long*)& __m128d_op2[0]) = 0x2711bad1e8e309ed; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffd24271c4; +- *((unsigned long*)& __m128d_result[0]) = 0x2711bad1e8e309ed; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x38a966b31be83ee9; +- *((unsigned long*)& __m256i_op0[2]) = 0x5f6108dc25b80001; +- *((unsigned long*)& __m256i_op0[1]) = 0xf41a56e8a20878d7; +- *((unsigned long*)& __m256i_op0[0]) = 0x683b8b67e20c0001; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000501e99b; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000109973de7; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001020f22; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000001890b7a39; +- *((unsigned long*)& __m256i_result[3]) = 0x38a966b301f41ffd; +- *((unsigned long*)& __m256i_result[2]) = 0x5f6108ee13ff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xf41a56e8d10201f6; +- *((unsigned long*)& __m256i_result[0]) = 0x683b8b34f1020001; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x38a966b301f41ffd; +- *((unsigned long*)& __m256d_op0[2]) = 0x5f6108ee13ff0000; +- *((unsigned long*)& __m256d_op0[1]) = 0xf41a56e8d10201f6; +- *((unsigned long*)& __m256d_op0[0]) = 0x683b8b34f1020001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x35); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; +- *((unsigned long*)& __m256i_op0[2]) = 0xc5c085372cfabfba; +- *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; +- *((unsigned long*)& __m256i_op0[0]) = 0x0658f2dc0eb21e3c; +- *((unsigned long*)& __m256i_op1[3]) = 0x000019410000e69a; +- *((unsigned long*)& __m256i_op1[2]) = 0xf259905a0c126604; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000883a00000f20; +- *((unsigned long*)& __m256i_op1[0]) = 0x6d3c2d3aa1c82947; +- *((unsigned long*)& __m256i_result[3]) = 0x0000f647000007d6; +- *((unsigned long*)& __m256i_result[2]) = 0x031b358c021ee663; +- *((unsigned long*)& __m256i_result[1]) = 0x0000faaf0000f9f8; +- *((unsigned long*)& __m256i_result[0]) = 0x02b4fdadfa9704df; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000015d050192cb; +- *((unsigned long*)& __m256d_op0[2]) = 0x028e509508b16ee9; +- *((unsigned long*)& __m256d_op0[1]) = 0x000033ff01020e23; +- *((unsigned long*)& __m256d_op0[0]) = 0x151196b58fd1114d; +- *((unsigned long*)& __m256d_op1[3]) = 0x372e9d75e8aab100; +- *((unsigned long*)& __m256d_op1[2]) = 0xc5c085372cfabfba; +- *((unsigned long*)& __m256d_op1[1]) = 0x31730b5beb7c99f5; +- *((unsigned long*)& __m256d_op1[0]) = 0x0658f2dc0eb21e3c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000019410000e69a; +- *((unsigned long*)& __m256i_op0[2]) = 0xf259905a0c126604; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000883a00000f20; +- *((unsigned long*)& __m256i_op0[0]) = 0x6d3c2d3aa1c82947; +- *((unsigned long*)& __m256i_result[3]) = 0x000019410000e6aa; +- *((unsigned long*)& __m256i_result[2]) = 0xf259905a0c126614; +- *((unsigned long*)& __m256i_result[1]) = 0x0000883a00000f30; +- *((unsigned long*)& __m256i_result[0]) = 0x6d3c2d3aa1c82957; +- __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x0001ffaa; +- *((int*)& __m256_op1[6]) = 0x0000040e; +- *((int*)& __m256_op1[5]) = 0x00007168; +- *((int*)& __m256_op1[4]) = 0x00007bb6; +- *((int*)& __m256_op1[3]) = 0x0001ffe8; +- *((int*)& __m256_op1[2]) = 0x0001fe9c; +- *((int*)& __m256_op1[1]) = 0x00002282; +- *((int*)& __m256_op1[0]) = 0x00001680; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff60090958; +- *((unsigned long*)& __m128i_op1[0]) = 0x0fa96b88d9944d42; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00001802041b0013; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1828f0e09bad7249; +- *((unsigned long*)& __m256i_op0[2]) = 0x07ffc1b723953cec; +- *((unsigned long*)& __m256i_op0[1]) = 0x61f2e9b333aab104; +- *((unsigned long*)& __m256i_op0[0]) = 0x6bf742aa0d7856a0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0d41c9a7bdd239a7; +- *((unsigned long*)& __m256i_op1[2]) = 0x0b025d0ef8fdf987; +- *((unsigned long*)& __m256i_op1[1]) = 0x002944f92da5a708; +- *((unsigned long*)& __m256i_op1[0]) = 0x038cf4ea999922ef; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff0000ffff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0xff000000ffffff00; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffff00ff; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000015d050192cb; +- *((unsigned long*)& __m256i_op0[2]) = 0x028e509508b16ee9; +- *((unsigned long*)& __m256i_op0[1]) = 0x000033ff01020e23; +- *((unsigned long*)& __m256i_op0[0]) = 0x151196b58fd1114d; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff0000ffff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0xff000000ffffff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffaff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffd7200fffff74f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000702f; +- __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xc0c00000c0c00000; +- *((unsigned long*)& __m128i_op2[0]) = 0xc0c00c01c2cd0009; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0003ff540000081c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0003ffd00003fd38; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001ffaa0000040e; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000716800007bb6; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001ffe80001fe9c; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000228200001680; +- *((unsigned long*)& __m256i_op2[3]) = 0x372e9d75e8aab100; +- *((unsigned long*)& __m256i_op2[2]) = 0xc5c085372cfabfba; +- *((unsigned long*)& __m256i_op2[1]) = 0x31730b5beb7c99f5; +- *((unsigned long*)& __m256i_op2[0]) = 0x0658f2dc0eb21e3c; +- *((unsigned long*)& __m256i_result[3]) = 0x002e4db200000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000315ac0000d658; +- *((unsigned long*)& __m256i_result[1]) = 0x00735278007cf94c; +- *((unsigned long*)& __m256i_result[0]) = 0x0003ed8800031b38; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; +- __m128i_out = __lsx_vfclass_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000015d050192cb; +- *((unsigned long*)& __m256i_op0[2]) = 0x028e509508b16ee9; +- *((unsigned long*)& __m256i_op0[1]) = 0x000033ff01020e23; +- *((unsigned long*)& __m256i_op0[0]) = 0x151196b58fd1114d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001ffaa0000040e; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000716800007bb6; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001ffe80001fe9c; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000228200001680; +- *((unsigned long*)& __m256i_result[3]) = 0x000100ab000500a0; +- *((unsigned long*)& __m256i_result[2]) = 0x000200b800080124; +- *((unsigned long*)& __m256i_result[1]) = 0x0001011b000200aa; +- *((unsigned long*)& __m256i_result[0]) = 0x00150118008f0091; +- __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000019410000e69a; +- *((unsigned long*)& __m256i_op0[2]) = 0xf259905a0c126604; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000883a00000f20; +- *((unsigned long*)& __m256i_op0[0]) = 0x6d3c2d3aa1c82947; +- *((unsigned long*)& __m256i_op1[3]) = 0x372e9d75e8aab100; +- *((unsigned long*)& __m256i_op1[2]) = 0xc5c085372cfabfba; +- *((unsigned long*)& __m256i_op1[1]) = 0x31730b5beb7c99f5; +- *((unsigned long*)& __m256i_op1[0]) = 0x0658f2dc0eb21e3c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000b6b60001979a; +- *((unsigned long*)& __m256i_result[2]) = 0x00011591000125be; +- *((unsigned long*)& __m256i_result[1]) = 0x000093950000a915; +- *((unsigned long*)& __m256i_result[0]) = 0x0001201600004783; +- __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x372e9d75e8aab100; +- *((unsigned long*)& __m256i_op0[2]) = 0xc5c085372cfabfba; +- *((unsigned long*)& __m256i_op0[1]) = 0x31730b5beb7c99f5; +- *((unsigned long*)& __m256i_op0[0]) = 0x0658f2dc0eb21e3c; +- *((int*)& __m256_result[7]) = 0x4e5cba76; +- *((int*)& __m256_result[6]) = 0xcdbaaa78; +- *((int*)& __m256_result[5]) = 0xce68fdeb; +- *((int*)& __m256_result[4]) = 0x4e33eaff; +- *((int*)& __m256_result[3]) = 0x4e45cc2d; +- *((int*)& __m256_result[2]) = 0xcda41b30; +- *((int*)& __m256_result[1]) = 0x4ccb1e5c; +- *((int*)& __m256_result[0]) = 0x4d6b21e4; +- __m256_out = __lasx_xvffint_s_w(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00001802041b0013; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xc0c00000c0c00000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc0c00c01c2cd0009; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0e2d5626ff75cdbc; +- *((unsigned long*)& __m256i_op0[2]) = 0x5db4b156e2002a78; +- *((unsigned long*)& __m256i_op0[1]) = 0xeeffbeb03ba3e6b0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0c16e25eb28d27ea; +- *((unsigned long*)& __m256d_result[3]) = 0x41ac5aac4c000000; +- *((unsigned long*)& __m256d_result[2]) = 0xc161464880000000; +- *((unsigned long*)& __m256d_result[1]) = 0xc1b1004150000000; +- *((unsigned long*)& __m256d_result[0]) = 0x41cdd1f358000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00001802041b0013; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000007f7f02; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x1828f0e09bad7249; +- *((unsigned long*)& __m256d_op0[2]) = 0x07ffc1b723953cec; +- *((unsigned long*)& __m256d_op0[1]) = 0x61f2e9b333aab104; +- *((unsigned long*)& __m256d_op0[0]) = 0x6bf742aa0d7856a0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4e5cba76cdbaaa78; +- *((unsigned long*)& __m256i_op0[2]) = 0xce68fdeb4e33eaff; +- *((unsigned long*)& __m256i_op0[1]) = 0x4e45cc2dcda41b30; +- *((unsigned long*)& __m256i_op0[0]) = 0x4ccb1e5c4d6b21e4; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x44bb2cd3a35c2fd0; +- *((unsigned long*)& __m256i_result[0]) = 0xca355ba46a95e31c; +- __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x002e4db200000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000315ac0000d658; +- *((unsigned long*)& __m256i_op0[1]) = 0x00735278007cf94c; +- *((unsigned long*)& __m256i_op0[0]) = 0x0003ed8800031b38; +- *((unsigned long*)& __m256i_result[3]) = 0xffd1b24e00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffcea54ffff29a8; +- *((unsigned long*)& __m256i_result[1]) = 0xff8cad88ff8306b4; +- *((unsigned long*)& __m256i_result[0]) = 0xfffc1278fffce4c8; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffd1b24e00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffcea54ffff29a8; +- *((unsigned long*)& __m256i_op0[1]) = 0xff8cad88ff8306b4; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffc1278fffce4c8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0e2d5626ff75cdbc; +- *((unsigned long*)& __m256i_op1[2]) = 0x5db4b156e2002a78; +- *((unsigned long*)& __m256i_op1[1]) = 0xeeffbeb03ba3e6b0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0c16e25eb28d27ea; +- *((unsigned long*)& __m256i_result[3]) = 0xf96d674800000000; +- *((unsigned long*)& __m256i_result[2]) = 0x44a4330e2c7116c0; +- *((unsigned long*)& __m256i_result[1]) = 0x14187a7822b653c0; +- *((unsigned long*)& __m256i_result[0]) = 0xfbe0b866962b96d0; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xc1bdceee242071db; +- *((unsigned long*)& __m128i_op1[0]) = 0xe8c7b756d76aa578; +- *((unsigned long*)& __m128i_result[1]) = 0xe0dee7779210b8ed; +- *((unsigned long*)& __m128i_result[0]) = 0xf463dbabebb5d2bc; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x44bb2cd3a35c2fd0; +- *((unsigned long*)& __m256i_op0[0]) = 0xca355ba46a95e31c; +- *((unsigned long*)& __m256i_op1[3]) = 0x000100ab000500a0; +- *((unsigned long*)& __m256i_op1[2]) = 0x000200b800080124; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001011b000200aa; +- *((unsigned long*)& __m256i_op1[0]) = 0x00150118008f0091; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7f057f0b7f5b007f; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff0000ffff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff000000ffffff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffffffff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x01fa022a01a401e5; +- *((unsigned long*)& __m256i_op1[2]) = 0x030d03aa0079029b; +- *((unsigned long*)& __m256i_op1[1]) = 0x024c01f901950261; +- *((unsigned long*)& __m256i_op1[0]) = 0x008102c2008a029f; +- *((unsigned long*)& __m256i_op2[3]) = 0x002e4db200000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x000315ac0000d658; +- *((unsigned long*)& __m256i_op2[1]) = 0x00735278007cf94c; +- *((unsigned long*)& __m256i_op2[0]) = 0x0003ed8800031b38; +- *((unsigned long*)& __m256i_result[3]) = 0x01a72334ffff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0xff4f6838ff937648; +- *((unsigned long*)& __m256i_result[1]) = 0x00a2afb7fff00ecb; +- *((unsigned long*)& __m256i_result[0]) = 0xffce110f004658c7; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x00001802041b0014; +- __m128i_out = __lsx_vsub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffd1b24e00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffcea54ffff29a8; +- *((unsigned long*)& __m256i_op0[1]) = 0xff8cad88ff8306b4; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffc1278fffce4c8; +- *((unsigned long*)& __m256i_result[3]) = 0x0802010000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0806030008080001; +- *((unsigned long*)& __m256i_result[1]) = 0x0801010108010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0806000008060302; +- __m256i_out = __lasx_xvclo_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x44bb2cd3a35c2fd0; +- *((unsigned long*)& __m256i_op0[0]) = 0xca355ba46a95e31c; +- *((unsigned long*)& __m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; +- *((unsigned long*)& __m256i_result[2]) = 0x1d1d1d1d1d1d1d1d; +- *((unsigned long*)& __m256i_result[1]) = 0x61d849f0c0794ced; +- *((unsigned long*)& __m256i_result[0]) = 0xe75278c187b20039; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf96d674800000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x44a4330e2c7116c0; +- *((unsigned long*)& __m256i_op0[1]) = 0x14187a7822b653c0; +- *((unsigned long*)& __m256i_op0[0]) = 0xfbe0b866962b96d0; +- *((unsigned long*)& __m256i_op1[3]) = 0xffd1b24e00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffcea54ffff29a8; +- *((unsigned long*)& __m256i_op1[1]) = 0xff8cad88ff8306b4; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffc1278fffce4c8; +- *((unsigned long*)& __m256i_result[3]) = 0xebfd15f000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x01700498ff8f1600; +- *((unsigned long*)& __m256i_result[1]) = 0xf520c7c024221300; +- *((unsigned long*)& __m256i_result[0]) = 0x00802fd0ff540a80; +- __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xebfd15f000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x01700498ff8f1600; +- *((unsigned long*)& __m256i_op0[1]) = 0xf520c7c024221300; +- *((unsigned long*)& __m256i_op0[0]) = 0x00802fd0ff540a80; +- *((unsigned long*)& __m256i_op1[3]) = 0xebfd15f000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x01700498ff8f1600; +- *((unsigned long*)& __m256i_op1[1]) = 0xf520c7c024221300; +- *((unsigned long*)& __m256i_op1[0]) = 0x00802fd0ff540a80; +- *((unsigned long*)& __m256i_op2[3]) = 0xf96d674800000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x44a4330e2c7116c0; +- *((unsigned long*)& __m256i_op2[1]) = 0x14187a7822b653c0; +- *((unsigned long*)& __m256i_op2[0]) = 0xfbe0b866962b96d0; +- *((unsigned long*)& __m256i_result[3]) = 0xebfd15f000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x015c6a7facc39600; +- *((unsigned long*)& __m256i_result[1]) = 0xfa070a51cbd95300; +- *((unsigned long*)& __m256i_result[0]) = 0x00c7463075439280; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x1b976395; +- *((int*)& __m256_op0[6]) = 0x2fc4c101; +- *((int*)& __m256_op0[5]) = 0xe37affb4; +- *((int*)& __m256_op0[4]) = 0x2fc05f69; +- *((int*)& __m256_op0[3]) = 0x18b988e6; +- *((int*)& __m256_op0[2]) = 0x4facb558; +- *((int*)& __m256_op0[1]) = 0xe5fb66c8; +- *((int*)& __m256_op0[0]) = 0x1da8e5bb; +- *((int*)& __m256_op1[7]) = 0x01a72334; +- *((int*)& __m256_op1[6]) = 0xffff00ff; +- *((int*)& __m256_op1[5]) = 0xff4f6838; +- *((int*)& __m256_op1[4]) = 0xff937648; +- *((int*)& __m256_op1[3]) = 0x00a2afb7; +- *((int*)& __m256_op1[2]) = 0xfff00ecb; +- *((int*)& __m256_op1[1]) = 0xffce110f; +- *((int*)& __m256_op1[0]) = 0x004658c7; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f057f0b7f5b007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7f00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[0]) = 0x7f7fff7fff7fff00; +- __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf96d674800000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x44a4330e2c7116c0; +- *((unsigned long*)& __m256i_op0[1]) = 0x14187a7822b653c0; +- *((unsigned long*)& __m256i_op0[0]) = 0xfbe0b866962b96d0; +- *((unsigned long*)& __m256i_result[3]) = 0xf90c0c0c00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0ca40c0c0c0c0cc0; +- *((unsigned long*)& __m256i_result[1]) = 0x0c0c0c0c0cb60cc0; +- *((unsigned long*)& __m256i_result[0]) = 0xfbe0b80c960c96d0; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xf90c0c0c00000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0ca40c0c0c0c0cc0; +- *((unsigned long*)& __m256d_op0[1]) = 0x0c0c0c0c0cb60cc0; +- *((unsigned long*)& __m256d_op0[0]) = 0xfbe0b80c960c96d0; +- *((unsigned long*)& __m256d_op1[3]) = 0x1b9763952fc4c101; +- *((unsigned long*)& __m256d_op1[2]) = 0xe37affb42fc05f69; +- *((unsigned long*)& __m256d_op1[1]) = 0x18b988e64facb558; +- *((unsigned long*)& __m256d_op1[0]) = 0xe5fb66c81da8e5bb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f057f0b7f5b007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000007f007f5; +- __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x1b9763952fc4c101; +- *((unsigned long*)& __m256i_op1[2]) = 0xe37affb42fc05f69; +- *((unsigned long*)& __m256i_op1[1]) = 0x18b988e64facb558; +- *((unsigned long*)& __m256i_op1[0]) = 0xe5fb66c81da8e5bb; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xe37affb42fc05f69; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x65fb66c81da8e5ba; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1d1d1d1d1d1d1d1d; +- *((unsigned long*)& __m256i_op0[2]) = 0x1d1d1d1d1d1d1d1d; +- *((unsigned long*)& __m256i_op0[1]) = 0x61d849f0c0794ced; +- *((unsigned long*)& __m256i_op0[0]) = 0xe75278c187b20039; +- *((unsigned long*)& __m256i_op1[3]) = 0xf90c0c0c00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0ca40c0c0c0c0cc0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0c0c0c0c0cb60cc0; +- *((unsigned long*)& __m256i_op1[0]) = 0xfbe0b80c960c96d0; +- *((unsigned long*)& __m256i_result[3]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256i_result[2]) = 0x146014141414146e; +- *((unsigned long*)& __m256i_result[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256i_result[0]) = 0xf19998668e5f4b84; +- __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000007f007f5; +- *((unsigned long*)& __m256i_op1[3]) = 0x002e4db200000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000315ac0000d658; +- *((unsigned long*)& __m256i_op1[1]) = 0x00735278007cf94c; +- *((unsigned long*)& __m256i_op1[0]) = 0x0003ed8800031b38; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x3d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256i_op0[2]) = 0x146014141414146e; +- *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256i_op0[0]) = 0xf19998668e5f4b84; +- long_op1 = 0x0000007942652524; +- *((unsigned long*)& __m256i_result[3]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000007942652524; +- *((unsigned long*)& __m256i_result[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256i_result[0]) = 0xf19998668e5f4b84; +- __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x00001802041b0014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000003004; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256i_op1[2]) = 0x00d6c1c830160048; +- *((unsigned long*)& __m256i_op1[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256i_op1[0]) = 0xe3aebaf4df958004; +- *((unsigned long*)& __m256i_result[3]) = 0xc58a0a0a07070706; +- *((unsigned long*)& __m256i_result[2]) = 0x006b60e4180b0023; +- *((unsigned long*)& __m256i_result[1]) = 0x1b39153f334b966a; +- *((unsigned long*)& __m256i_result[0]) = 0xf1d75d79efcac002; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f7f02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00003f803f800100; +- __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xe37affb42fc05f69; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x65fb66c81da8e5ba; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256d_op2[2]) = 0x00d6c1c830160048; +- *((unsigned long*)& __m256d_op2[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256d_op2[0]) = 0xe3aebaf4df958004; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0x00d6c1c830160048; +- *((unsigned long*)& __m256d_result[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256d_result[0]) = 0xe3aebaf4df958004; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0014; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000c01020d8009; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00d6c1c830160048; +- *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256i_op0[0]) = 0xe3aebaf4df958004; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x006be0e4180b0024; +- *((unsigned long*)& __m256i_result[1]) = 0x1b39153f334b166b; +- *((unsigned long*)& __m256i_result[0]) = 0xf1d7dd7aefcac002; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f800000976801fe; +- *((unsigned long*)& __m128i_op1[0]) = 0x837c1ae57f8012ed; +- *((unsigned long*)& __m128i_result[1]) = 0x976801fd6897fe02; +- *((unsigned long*)& __m128i_result[0]) = 0x7f8012ec807fed13; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f801fa06451ef11; +- *((unsigned long*)& __m128i_op0[0]) = 0x68bcf93435ed25ed; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffb64c; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000003900; +- *((unsigned long*)& __m128i_result[0]) = 0x68bcf93435ed25ed; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffc00; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffc00; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003900; +- *((unsigned long*)& __m128i_op0[0]) = 0x68bcf93435ed25ed; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_wu(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00003f803f800100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000c01020d8009; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000003004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000c01020d5005; +- __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f801fa06451ef11; +- *((unsigned long*)& __m128i_op1[0]) = 0x68bcf93435ed25ed; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01fa022a01a401e5; +- *((unsigned long*)& __m256i_op0[2]) = 0x030d03aa0079029b; +- *((unsigned long*)& __m256i_op0[1]) = 0x024c01f901950261; +- *((unsigned long*)& __m256i_op0[0]) = 0x008102c2008a029f; +- *((unsigned long*)& __m256i_result[3]) = 0x54000000ca000000; +- *((unsigned long*)& __m256i_result[2]) = 0x5400000036000000; +- *((unsigned long*)& __m256i_result[1]) = 0xf2000000c2000000; +- *((unsigned long*)& __m256i_result[0]) = 0x840000003e000000; +- __m256i_out = __lasx_xvslli_w(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000400000004000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000400000007004; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xc58a0a0a; +- *((int*)& __m256_op0[6]) = 0x07070706; +- *((int*)& __m256_op0[5]) = 0x006b60e4; +- *((int*)& __m256_op0[4]) = 0x180b0023; +- *((int*)& __m256_op0[3]) = 0x1b39153f; +- *((int*)& __m256_op0[2]) = 0x334b966a; +- *((int*)& __m256_op0[1]) = 0xf1d75d79; +- *((int*)& __m256_op0[0]) = 0xefcac002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvftintl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00003004; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0xc3080000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc58a0a0a07070706; +- *((unsigned long*)& __m256i_op0[2]) = 0x006b60e4180b0023; +- *((unsigned long*)& __m256i_op0[1]) = 0x1b39153f334b966a; +- *((unsigned long*)& __m256i_op0[0]) = 0xf1d75d79efcac002; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000400000004000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000400000007004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x00a300a300a300a3; +- *((unsigned long*)& __m128i_result[0]) = 0x00a300a300a300a3; +- __m128i_out = __lsx_vldi(1187); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256i_op0[2]) = 0x00d6c1c830160048; +- *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256i_op0[0]) = 0xe3aebaf4df958004; +- *((unsigned long*)& __m256i_result[3]) = 0xfffe000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100020001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fffffffffffe; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0x80000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00a300a3; +- *((int*)& __m128_op1[2]) = 0x00a300a3; +- *((int*)& __m128_op1[1]) = 0x00a300a3; +- *((int*)& __m128_op1[0]) = 0x00a300a3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffe000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100020001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256i_op1[2]) = 0x00d6c1c830160048; +- *((unsigned long*)& __m256i_op1[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256i_op1[0]) = 0xe3aebaf4df958004; +- *((unsigned long*)& __m256i_result[3]) = 0xc5890a0a07070707; +- *((unsigned long*)& __m256i_result[2]) = 0x006be0e4180b8024; +- *((unsigned long*)& __m256i_result[1]) = 0x1b399540334c966c; +- *((unsigned long*)& __m256i_result[0]) = 0x71d7dd7aefcac001; +- __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xc5890a0a07070707; +- *((unsigned long*)& __m256i_op1[2]) = 0x006be0e4180b8024; +- *((unsigned long*)& __m256i_op1[1]) = 0x1b399540334c966c; +- *((unsigned long*)& __m256i_op1[0]) = 0x71d7dd7aefcac001; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256i_op0[2]) = 0x00d6c1c830160048; +- *((unsigned long*)& __m256i_op0[1]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256i_op0[0]) = 0xe3aebaf4df958004; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x36722a7e66972cd6; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001802041b0013; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001ffff00000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x2f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00001802; +- *((int*)& __m128_op0[0]) = 0x041b0013; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vftintrp_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00001802; +- *((int*)& __m128_op0[0]) = 0x041b0013; +- *((int*)& __m128_op1[3]) = 0xff800000; +- *((int*)& __m128_op1[2]) = 0xff800000; +- *((int*)& __m128_op1[1]) = 0xff800000; +- *((int*)& __m128_op1[0]) = 0xc3080000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8b1414140e0e0e0e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x36722a7e66972cd6; +- *((unsigned long*)& __m256i_op1[3]) = 0xc58a0a0a07070706; +- *((unsigned long*)& __m256i_op1[2]) = 0x006b60e4180b0023; +- *((unsigned long*)& __m256i_op1[1]) = 0x1b39153f334b966a; +- *((unsigned long*)& __m256i_op1[0]) = 0xf1d75d79efcac002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x006b60e40e0e0e0e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x36722a7e66972cd6; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0edf8d7; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffbe8bc70f; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe0edf8d7; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffbe8bc70f; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffe06df8d7; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffbe8b470f; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x8b141414; +- *((int*)& __m256_op0[4]) = 0x0e0e0e0e; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x36722a7e; +- *((int*)& __m256_op0[0]) = 0x66972cd6; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001ffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff800000c3080000; +- *((unsigned long*)& __m128i_result[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_result[0]) = 0xff81ffffc3080000; +- __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffc00; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffc00; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0xbf800000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0xcf000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvmskltz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x00000045eef14fe8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffc00; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffc00; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffc00; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffc00; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000020000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000020000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000020000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000020000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000003ffffffff; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xbf8000000000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xcf00000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xbf80000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xcf00000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x1040400000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0961000100000001; +- __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x10404000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x09610001; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff0001; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003a099512; +- *((unsigned long*)& __m256i_op0[1]) = 0x280ac9da313763f5; +- *((unsigned long*)& __m256i_op0[0]) = 0xe032c738adcc6bbf; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xfffe000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0001000100020001; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000fffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0001; +- *((unsigned long*)& __m256i_result[2]) = 0x000000003a099512; +- *((unsigned long*)& __m256i_result[1]) = 0x280ac9da313763f5; +- *((unsigned long*)& __m256i_result[0]) = 0xe032c738adcc6bbf; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffe000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100020001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff000000010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000095120000; +- *((unsigned long*)& __m256i_op1[1]) = 0xc9da000063f50000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc7387fff6bbfffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff00000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffc81aca; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003a0a9512; +- *((unsigned long*)& __m256i_op0[1]) = 0x280ac9da313863f4; +- *((unsigned long*)& __m256i_op0[0]) = 0xe032c739adcc6bbd; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffe000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100020001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000fffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0xfffdffffffc81aca; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff3a0b9512; +- *((unsigned long*)& __m256i_result[1]) = 0x280bc9db313a63f5; +- *((unsigned long*)& __m256i_result[0]) = 0xe032c738adcb6bbb; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffc81aca; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003a0a9512; +- *((unsigned long*)& __m256i_op0[1]) = 0x280ac9da313863f4; +- *((unsigned long*)& __m256i_op0[0]) = 0xe032c739adcc6bbd; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x006b58e20e1e0e0f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x3672227c66a72cd7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000003594; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000082fb80e; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000c7e8; +- *((unsigned long*)& __m256i_result[0]) = 0x1ad6119c12def7bb; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff00000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x074132a240000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff00000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff000000010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000095120000; +- *((unsigned long*)& __m256i_op1[1]) = 0xc9da000063f50000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc7387fff6bbfffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xbf8000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xcf00000000000000; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff00000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffff000000010000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000095120000; +- *((unsigned long*)& __m256d_op0[1]) = 0xc9da000063f50000; +- *((unsigned long*)& __m256d_op0[0]) = 0xc7387fff6bbfffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xfffe000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256d_op1[1]) = 0x4001000100020000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff000000010000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000095120000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc9da000063f50000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc7387fff6bbfffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffdffffffc81aca; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff3a0b9512; +- *((unsigned long*)& __m256i_op1[1]) = 0x280bc9db313a63f5; +- *((unsigned long*)& __m256i_op1[0]) = 0xe032c738adcb6bbb; +- *((unsigned long*)& __m256i_result[3]) = 0xffff800001010400; +- *((unsigned long*)& __m256i_result[2]) = 0x000180009d120004; +- *((unsigned long*)& __m256i_result[1]) = 0xc9da080067f50020; +- *((unsigned long*)& __m256i_result[0]) = 0xc73c7fff6bbfffff; +- __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe06df8d7; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffbe8b470f; +- *((unsigned long*)& __m256i_result[3]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffe06df0d7; +- *((unsigned long*)& __m256i_result[1]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffbe8b470f; +- __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbf8000000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xcf00000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x003f00000000003f; +- *((unsigned long*)& __m128i_result[0]) = 0x003f000000000000; +- __m128i_out = __lsx_vsat_hu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff800000c3080002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x639c3fffb5dffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xb8c7800094400001; +- *((unsigned long*)& __m256i_result[3]) = 0x0063009c003f00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00b500df00ff00fe; +- *((unsigned long*)& __m256i_result[1]) = 0x00b800c700800000; +- *((unsigned long*)& __m256i_result[0]) = 0x0094004000000001; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; +- __m256i_out = __lasx_xvldi(-4080); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x639c3fffb5dffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xb8c7800094400001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0008000e000c000f; +- *((unsigned long*)& __m256i_result[0]) = 0x0009000100040001; +- __m256i_out = __lasx_xvpcnt_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffff00000000ffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffe06df0d7; +- *((unsigned long*)& __m256d_op1[1]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffbe8b470f; +- *((unsigned long*)& __m256d_result[3]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256d_result[2]) = 0xffff00000000ffff; +- *((unsigned long*)& __m256d_result[1]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffbe8b470f; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000800080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc9d8080067f50020; +- *((unsigned long*)& __m256i_op0[0]) = 0xc70000020000c000; +- *((unsigned long*)& __m256i_result[3]) = 0xf000f00000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000f000f0000000; +- *((unsigned long*)& __m256i_result[1]) = 0xf0f008000ff5000f; +- *((unsigned long*)& __m256i_result[0]) = 0xf00000020000f000; +- __m256i_out = __lasx_xvsat_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xc090c40000000000; +- __m128d_out = __lsx_vflogb_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xbf8000000000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xcf00000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x92); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000800080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc9d8080067f50020; +- *((unsigned long*)& __m256i_op0[0]) = 0xc70000020000c000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe06df0d7; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffbe8b470f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007ffffffff7ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x49d8080067f4f81f; +- __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff00000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8001000080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000800080000728; +- *((unsigned long*)& __m256i_op1[1]) = 0x8001800080008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x800080008000b8f1; +- *((unsigned long*)& __m256i_result[3]) = 0x8000ffff8000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff80008000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x800080008000b8f1; +- __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007ffffffff7ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x49d8080067f4f81f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007f00fffff7ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xd8490849f467f867; +- __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0xb7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x003f00000000003f; +- *((unsigned long*)& __m128i_op1[0]) = 0x003f000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007ffffffff7ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x49d8080067f4f81f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7ffff7ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x080008000800f81f; +- __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xa8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000ffff8000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff80008000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x800080008000b8f1; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff00000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x074132a240000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000ffff8000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x06f880008000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x800080008000b8f1; +- __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xbf800000; +- *((int*)& __m128_op0[2]) = 0x0000ffff; +- *((int*)& __m128_op0[1]) = 0xcf000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x003f0000; +- *((int*)& __m128_op1[2]) = 0x0000003f; +- *((int*)& __m128_op1[1]) = 0x003f0000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe06df8d7; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffbe8b470f; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe06df0d7; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffbe8b470f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xbf8000000000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xcf00000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f00; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ffe7fffeffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffd84900000849; +- *((unsigned long*)& __m256i_op0[0]) = 0x07fffc670800f086; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x311d9b643ec1fe01; +- *((unsigned long*)& __m256i_op1[0]) = 0x344ade20fe00fd01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007f00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x311d73ad3ec2064a; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007f00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x311d73ad3ec2064a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000001fc000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000c475ceb40000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fb0819280000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ffffffffffff7ff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe06df0d7; +- *((unsigned long*)& __m256i_op0[1]) = 0x988eb37e000fb33d; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffed95be394b1e; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000ffff8000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x06f880008000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x800080008000b8f1; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000ffff8000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x06f880008000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x800080008000b8f1; +- *((unsigned long*)& __m256i_result[3]) = 0x8000010180000101; +- *((unsigned long*)& __m256i_result[2]) = 0xfa08800080000101; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x800080008000480f; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001fc000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000c475ceb40000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fb0819280000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x074132a240000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000003a0200; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x0000ffff; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x0000ffff; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vmini_d(__m128i_op0,1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00ff00ff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0040000000ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0040000000000000; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x36); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f00; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ffe7fffeffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffd84900000849; +- *((unsigned long*)& __m256i_op0[0]) = 0x07fffc670800f086; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x3922d40000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000c85221c0; +- *((unsigned long*)& __m256i_op0[0]) = 0xf7ebfab800000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f20; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000009f0; +- __m256i_out = __lasx_xvmskltz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000ffff8000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x06f880008000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x800080008000b8f1; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000010180000101; +- *((unsigned long*)& __m256i_op1[2]) = 0xfa08800080000101; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x800080008000480f; +- *((unsigned long*)& __m256i_result[3]) = 0x0001010000010100; +- *((unsigned long*)& __m256i_result[2]) = 0x0101000000010100; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000000010100; +- __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f20; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000009f0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000001000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000800080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xc9d8080067f50020; +- *((unsigned long*)& __m256i_op1[0]) = 0xc70000020000c000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000010100; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000001000100; +- __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000800000000000; +- __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0040000000ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0040000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0040000000ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0040000000000000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvflogb_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0040000000ff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0040000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0020000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0020c00000000000; +- __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000001000100; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffbf7f7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffe651bfff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000010100; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000001000100; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f20; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000009f0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00010101; +- *((int*)& __m256_op1[6]) = 0x01010101; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00010100; +- *((int*)& __m256_op1[1]) = 0x00010000; +- *((int*)& __m256_op1[0]) = 0x01000100; +- *((int*)& __m256_result[7]) = 0x00010101; +- *((int*)& __m256_result[6]) = 0x01010101; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00010100; +- *((int*)& __m256_result[1]) = 0x00010000; +- *((int*)& __m256_result[0]) = 0x01000100; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00010101; +- *((int*)& __m256_op0[6]) = 0x01010101; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00010100; +- *((int*)& __m256_op0[1]) = 0x00010000; +- *((int*)& __m256_op0[0]) = 0x01000100; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xbf7f7fff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xe651bfff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0001010101010101; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000010100; +- *((unsigned long*)& __m256d_op1[0]) = 0x0001000001000100; +- *((unsigned long*)& __m256d_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[2]) = 0xffffffffbf7f7fff; +- *((unsigned long*)& __m256d_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[0]) = 0xffffffffe651bfff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffbf7f7fff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffe651bfff; +- __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0cc08723ff900001; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xcc9b89f2f6cef440; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0cc08723006fffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x3364760e09310bc0; +- __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0cc08723ff900001; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xcc9b89f2f6cef440; +- int_result = 0x0000000000000000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x7); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_w(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000020202; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000002020202; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000020200; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xbf7f7fff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xe651bfff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0xffffffff; +- *((int*)& __m256_op2[2]) = 0xf328dfff; +- *((int*)& __m256_op2[1]) = 0x6651bfff; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; +- *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff328dfff; +- *((unsigned long*)& __m256i_op1[0]) = 0x6651bfff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffe0001c3fe4001; +- *((unsigned long*)& __m256i_result[0]) = 0x8ffe800100000000; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffbf7f7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe651bfff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffbf7f7fff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffe651bfff; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0cc08723ff900001; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xcc9b89f2f6cef440; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xfffffff8; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xff800000; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xfffffff8; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x07070707; +- *((int*)& __m256_op0[5]) = 0x01020400; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00020100; +- *((int*)& __m256_op0[1]) = 0x07030200; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffff80; +- *((int*)& __m256_op1[6]) = 0xfefeff00; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x01000400; +- *((int*)& __m256_op1[3]) = 0xffffff80; +- *((int*)& __m256_op1[2]) = 0xfeff0000; +- *((int*)& __m256_op1[1]) = 0x02020080; +- *((int*)& __m256_op1[0]) = 0x5c800400; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0xffffffff; +- *((int*)& __m256_op2[2]) = 0xf328dfff; +- *((int*)& __m256_op2[1]) = 0x6651bfff; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffff80; +- *((int*)& __m256_result[6]) = 0x46867f79; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xf328dfff; +- *((int*)& __m256_result[1]) = 0x6651bfff; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffff8046867f79; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; +- *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff80000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffff8046867f79; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffff328dfff; +- *((unsigned long*)& __m256i_result[0]) = 0x6651bfff80000000; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffff8046867f79; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; +- *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff80000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ff80; +- *((unsigned long*)& __m256i_result[2]) = 0x0000468600007f79; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000f3280000dfff; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffff8046867f79; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff328dfff; +- *((unsigned long*)& __m256i_op1[0]) = 0x6651bfff80000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00010001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00010001; +- __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffbf7f7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe651bfff; +- *((unsigned long*)& __m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; +- *((unsigned long*)& __m256i_result[2]) = 0x1d1d1d1ddd9d9d1d; +- *((unsigned long*)& __m256i_result[1]) = 0x1d1d1d1d1d1d1d1d; +- *((unsigned long*)& __m256i_result[0]) = 0x1d1d1d1d046fdd1d; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffbf7f00007fff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffe651ffffbfff; +- *((int*)& __m256_result[7]) = 0x4f800000; +- *((int*)& __m256_result[6]) = 0x4f800000; +- *((int*)& __m256_result[5]) = 0x4f7fffbf; +- *((int*)& __m256_result[4]) = 0x46fffe00; +- *((int*)& __m256_result[3]) = 0x4f800000; +- *((int*)& __m256_result[2]) = 0x4f800000; +- *((int*)& __m256_result[1]) = 0x4f7fffe6; +- *((int*)& __m256_result[0]) = 0x4f7fffc0; +- __m256_out = __lasx_xvffint_s_wu(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000007070707; +- *((unsigned long*)& __m256i_op1[2]) = 0x0102040000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000020100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0703020000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000707; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010200000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000070300000000; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000707; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000010200000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000070300000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; +- *((unsigned long*)& __m256i_op1[3]) = 0x1d1d1d1d1d1d1d1d; +- *((unsigned long*)& __m256i_op1[2]) = 0x1d1d1d1ddd9d9d1d; +- *((unsigned long*)& __m256i_op1[1]) = 0x1d1d1d1d1d1d1d1d; +- *((unsigned long*)& __m256i_op1[0]) = 0x1d1d1d1d046fdd1d; +- *((unsigned long*)& __m256i_result[3]) = 0x00001d1d00001d1d; +- *((unsigned long*)& __m256i_result[2]) = 0x00001d1d00007f79; +- *((unsigned long*)& __m256i_result[1]) = 0x00001d1d00001d1d; +- *((unsigned long*)& __m256i_result[0]) = 0x00001d1d0000dd1d; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfe02fe02fee5fe22; +- *((unsigned long*)& __m256i_op1[0]) = 0xff49fe4200000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_op2[0]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffff00fe81; +- *((unsigned long*)& __m256i_result[0]) = 0xfe808d00eefffff8; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000007070707; +- *((unsigned long*)& __m256i_op0[2]) = 0x0102040000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000020100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0703020000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfe02fe02fee5fe22; +- *((unsigned long*)& __m256i_op1[0]) = 0xff49fe4200000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0003f8040002f607; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002728b00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff328dfff; +- *((unsigned long*)& __m256i_op1[0]) = 0x6651bfff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0003f8040002f607; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffff328dfff; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffff328dfff; +- *((unsigned long*)& __m256i_op0[0]) = 0x6651bfff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0202020201010000; +- __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4f8000004f800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x4f7fffbf0000fe00; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000004f800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x4f7fffe64f7fffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfe02fe02fee5fe22; +- *((unsigned long*)& __m256i_op1[0]) = 0xff49fe4200000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffbf0000fe000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fe020000fe22; +- *((unsigned long*)& __m256i_result[0]) = 0xffe6fe42ffc00000; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0002000200000022; +- *((unsigned long*)& __m256i_op0[0]) = 0x0049004200000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000007f00000022; +- *((unsigned long*)& __m256i_result[0]) = 0x0000007f00000000; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xf328dfff; +- *((int*)& __m256_op1[1]) = 0x6651bfff; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x0000ffff; +- *((int*)& __m256_op2[6]) = 0x0000ff80; +- *((int*)& __m256_op2[5]) = 0x00004686; +- *((int*)& __m256_op2[4]) = 0x00007f79; +- *((int*)& __m256_op2[3]) = 0x0000ffff; +- *((int*)& __m256_op2[2]) = 0x0000ffff; +- *((int*)& __m256_op2[1]) = 0x0000f328; +- *((int*)& __m256_op2[0]) = 0x0000dfff; +- *((int*)& __m256_result[7]) = 0x0000ffff; +- *((int*)& __m256_result[6]) = 0x0000ff80; +- *((int*)& __m256_result[5]) = 0x00004686; +- *((int*)& __m256_result[4]) = 0x00007f79; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0x0000ffff; +- *((int*)& __m256_result[1]) = 0x0000f328; +- *((int*)& __m256_result[0]) = 0x0000dfff; +- __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xfe02fe02; +- *((int*)& __m256_op0[2]) = 0xfee5fe22; +- *((int*)& __m256_op0[1]) = 0xff49fe42; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x0000ffff; +- *((int*)& __m256_op1[6]) = 0x0000ff80; +- *((int*)& __m256_op1[5]) = 0x00004686; +- *((int*)& __m256_op1[4]) = 0x00007f79; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0x0000ffff; +- *((int*)& __m256_op1[1]) = 0x0000f328; +- *((int*)& __m256_op1[0]) = 0x0000dfff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ff80; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000468600007f79; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000f3280000dfff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xfe02fe02fee5fe22; +- *((unsigned long*)& __m256d_op1[0]) = 0xff49fe4200000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x00020001ffb6ffe0; +- *((unsigned long*)& __m256d_op2[0]) = 0x0049004200000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256d_result[0]) = 0xbf28b0686066be60; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0202020201010000; +- int_op1 = 0x00000045eef14fe8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000eef14fe8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0202020201010000; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000eef14fe8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0202020201010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000eef14fe8; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0202020201010000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xfe02fe02fee5fe22; +- *((unsigned long*)& __m256i_op2[0]) = 0xff49fe4200000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000eef14fe8; +- *((unsigned long*)& __m256i_result[1]) = 0xfffe928f1313c9cc; +- *((unsigned long*)& __m256i_result[0]) = 0x4244020201010000; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfe02fe02fee5fe22; +- *((unsigned long*)& __m256i_op0[0]) = 0xff49fe4200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xbf28b0686066be60; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0xff49fe4200000000; +- __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0xbf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000007f00000022; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000007f00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000007f00000022; +- *((unsigned long*)& __m256i_result[0]) = 0x0000007f00000000; +- __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_wu(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00020001ffb6ffe0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0049004200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ff80; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000468600007f79; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000f3280000dfff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffb7; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004c00000000; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xbf28b0686066be60; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x40d74f979f99419f; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000007f00000022; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000007f00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000003f00000011; +- *((unsigned long*)& __m256i_result[0]) = 0x0000003f00000000; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000460086; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f0079; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000f30028; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000df00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xbf28b0686066be60; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffff00ffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff00ff00; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ffffff00ff; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000460086; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f0079; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000f30028; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000df00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,-8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x40d74f979f99419f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000022; +- __m256i_out = __lasx_xvmskltz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000022; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff80; +- *((unsigned long*)& __m256i_result[2]) = 0x0000468600008078; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffff328ffffe021; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f7f7f7f00007f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0x3f28306860663e60; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x40d74f979f99419f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fff01fd7fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fff7fff; +- __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x40d74f979f99419f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0xbf28b0686066be60; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000022; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_w(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ff80; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000468600007f79; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000f3280000dfff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fff01fd7fff7fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fff7fff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ff80; +- *((unsigned long*)& __m256i_result[2]) = 0x0000468600007f79; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000f3280000dfff; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- int_result = 0x000000000000ffff; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128d_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- *((int*)& __m128_result[3]) = 0x4b7f00ff; +- *((int*)& __m128_result[2]) = 0x4b7f00ff; +- *((int*)& __m128_result[1]) = 0x4b7f00ff; +- *((int*)& __m128_result[0]) = 0x4b7f00ff; +- __m128_out = __lsx_vffint_s_w(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fff01fd7fff7fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00007ffe81fdfe03; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x40d74f979f99419f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x40d74f979f99419f; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000fff8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvsat_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff01fd7fff7fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007fff7fff7fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fff01fd7fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fff7fff; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; +- __m128i_out = __lsx_vmskltz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x000000000000ff80; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfeffffffffffffff; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x38); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000030000; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xc9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x00007ffe81fdfe03; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_du(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000ff80; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000ffff; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x60b53246; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x60b5054d; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfeffffffffff0002; +- __m128i_out = __lsx_vadd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x72); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffff0002; +- *((unsigned long*)& __m128i_op2[1]) = 0x54beed87bc3f2be1; +- *((unsigned long*)& __m128i_op2[0]) = 0x8024d8f6a494afcb; +- *((unsigned long*)& __m128i_result[1]) = 0xa8beed87bc3f2be1; +- *((unsigned long*)& __m128i_result[0]) = 0x0024d8f6a494006a; +- __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa8beed87bc3f2be1; +- *((unsigned long*)& __m128i_op0[0]) = 0x0024d8f6a494006a; +- *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x5641127843c0d41e; +- *((unsigned long*)& __m128i_result[0]) = 0xfedb27095b6bff95; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5641127843c0d41e; +- *((unsigned long*)& __m128i_op0[0]) = 0xfedb27095b6bff95; +- *((unsigned long*)& __m128i_op1[1]) = 0xa8beed87bc3f2be1; +- *((unsigned long*)& __m128i_op1[0]) = 0x0024d8f6a494006a; +- *((unsigned long*)& __m128i_result[1]) = 0xff7fffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xff7fffffffffffff; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff7fffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xff7fffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffff7ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x64); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00001f41ffffbf00; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00001f41ffffbf00; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00001f41ffffbf00; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000400000000; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x2b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa8beed87bc3f2be1; +- *((unsigned long*)& __m128i_op0[0]) = 0x0024d8f6a494006a; +- *((unsigned long*)& __m128i_result[1]) = 0xa8beed87bc3f2bd3; +- *((unsigned long*)& __m128i_result[0]) = 0x0024d8f6a494005c; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa8beed87bc3f2be1; +- *((unsigned long*)& __m128i_op0[0]) = 0x0024d8f6a494006a; +- *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001a8beed86; +- *((unsigned long*)& __m128i_result[0]) = 0x000000010024d8f5; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x54beed87bc3f2be1; +- *((unsigned long*)& __m128i_op0[0]) = 0x8024d8f6a494afcb; +- *((unsigned long*)& __m128i_result[1]) = 0x54feed87bc3f2be1; +- *((unsigned long*)& __m128i_result[0]) = 0x8064d8f6a494afcb; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x36); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x54feed87; +- *((int*)& __m128_op0[2]) = 0xbc3f2be1; +- *((int*)& __m128_op0[1]) = 0x8064d8f6; +- *((int*)& __m128_op0[0]) = 0xa494afcb; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0xff800000; +- __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x54feed87bc3f2be1; +- *((unsigned long*)& __m128i_op0[0]) = 0x8064d8f6a494afcb; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00001f41ffffbf00; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000040000fff8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfedb27095b6bff95; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x56411278; +- *((int*)& __m128_op0[2]) = 0x43c0d41e; +- *((int*)& __m128_op0[1]) = 0x0124d8f6; +- *((int*)& __m128_op0[0]) = 0xa494006b; +- *((int*)& __m128_op1[3]) = 0x7f800000; +- *((int*)& __m128_op1[2]) = 0xff800000; +- *((int*)& __m128_op1[1]) = 0xff800000; +- *((int*)& __m128_op1[0]) = 0xff800000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xc2409edab019323f; +- *((unsigned long*)& __m128i_op1[0]) = 0x460f3b393ef4be3a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x460f3b393ef4be3a; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0100000100010001; +- __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x56411278; +- *((int*)& __m128_op0[2]) = 0x43c0d41e; +- *((int*)& __m128_op0[1]) = 0x0124d8f6; +- *((int*)& __m128_op0[0]) = 0xa494006b; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x460f3b393ef4be3a; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xc2409eda; +- *((int*)& __m128_op1[2]) = 0xb019323f; +- *((int*)& __m128_op1[1]) = 0x460f3b39; +- *((int*)& __m128_op1[0]) = 0x3ef4be3a; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x460f3b39; +- *((int*)& __m128_result[0]) = 0x3ef4be3a; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xc2409edab019323f; +- *((unsigned long*)& __m128d_op0[0]) = 0x460f3b393ef4be3a; +- *((unsigned long*)& __m128d_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128d_op1[0]) = 0x0100000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000040000fff8; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x000000040000fff8; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x000000040000fff8; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000040000fff8; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x000000040000fff8; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000040000fff8; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00001f41ffffbf00; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x010180068080fff9; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vbitrev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000300; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000303; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff01fd7fff7fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007fff7fff7fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fff01fd7fff7fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fff01fd7fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fff7fff; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x7a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000040000fff8; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; +- __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x010180068080fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x7fff7fff; +- *((int*)& __m256_op0[4]) = 0x7fff7fff; +- *((int*)& __m256_op0[3]) = 0x7fff01fd; +- *((int*)& __m256_op0[2]) = 0x7fff7fff; +- *((int*)& __m256_op0[1]) = 0x00007fff; +- *((int*)& __m256_op0[0]) = 0x7fff7fff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfc2f3183ef7ffff7; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; +- __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0xc5c5c5c5c5c5c5c5; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1515151515151515; +- *((unsigned long*)& __m256i_result[2]) = 0x1515151515151515; +- *((unsigned long*)& __m256i_result[1]) = 0x1515151515151515; +- *((unsigned long*)& __m256i_result[0]) = 0x1515151515151515; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1515151515151515; +- *((unsigned long*)& __m256i_op0[2]) = 0x1515151515151515; +- *((unsigned long*)& __m256i_op0[1]) = 0x1515151515151515; +- *((unsigned long*)& __m256i_op0[0]) = 0x1515151515151515; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00007ffe81fdfe03; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7ffe800000000000; +- __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[3]) = 0xff3cff3cff3cff3c; +- *((unsigned long*)& __m256i_op1[2]) = 0xff3cff3cff3cff3c; +- *((unsigned long*)& __m256i_op1[1]) = 0xff3cff3cff3cff3c; +- *((unsigned long*)& __m256i_op1[0]) = 0xff3cff3cff3cff3c; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff3cff3cff3cff3c; +- *((unsigned long*)& __m256i_op1[2]) = 0xff3cff3cff3cff3c; +- *((unsigned long*)& __m256i_op1[1]) = 0xff3cff3cff3cff3c; +- *((unsigned long*)& __m256i_op1[0]) = 0xff3cff3cff3cff3c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x00007ffe81fdfe03; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x80007ffe81fdfe03; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3fff3fff3fff3fff; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x7fff7fff; +- *((int*)& __m256_op0[4]) = 0x7fff7fff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x7fff7fff; +- *((int*)& __m256_op0[0]) = 0x7fff7fff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000001b3c4c0a5c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x3d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000007fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000007fff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x2a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1000000000000000; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x4f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000011; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000011; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010100000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010100000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0010000000000000; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x33); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000011; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000011; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000011; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000011; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010100000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010100000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0feff00000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0feff00000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000001; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000001; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000001; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000001; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000001; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandi_b(__m128i_op0,0x36); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0feff00000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0feff00000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff1001100100000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010100000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff1001100100000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010100000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfcc4004400400000; +- *((unsigned long*)& __m256i_result[2]) = 0x0040400000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfcc4004400400000; +- *((unsigned long*)& __m256i_result[0]) = 0x0040400000000000; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000001; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x36); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_h(__m128i_op0,3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010100000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010100000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; +- __m128i_out = __lsx_vfclass_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_du(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000010000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000010000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000010000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000010000000; +- __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000a0000000a; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000a0000000a; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffeb; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffeb; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffefffffffef; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffefffffffef; +- __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffefffffffef; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffefffffffef; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffefffffffef; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffefffffffef; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffefffffffef; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffefffffffef; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffefffffffef; +- __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x3d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff00ff00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff00ff00; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000f0000000f; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffefffffffef; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffefffffffef; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xbff0000000000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xdff8000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xdff8000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xdff8000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xdff8000000000000; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_d(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256d_op1[3]) = 0xdff8000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xdff8000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xdff8000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xdff8000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff00ff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff00ff00; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x01010101fe01fe01; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x01010101fe01fe01; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000040100000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000040100000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000040100000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000040100000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0080200000802000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0080200000802000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xbff0000000000000; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0080200000802000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0080200000802000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0080200000802000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0080200000802000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x20); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x5d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1e18000000000000; +- __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x1e180000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x1e180000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x1e180000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x1e180000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00802000; +- *((int*)& __m256_op1[6]) = 0x00802000; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x00802000; +- *((int*)& __m256_op1[2]) = 0x00802000; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1e1800001e180000; +- *((unsigned long*)& __m256i_result[2]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1e1800001e180000; +- *((unsigned long*)& __m256i_result[0]) = 0x1e18000000000000; +- __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xfe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x1e18000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x1e18000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x1e18000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xbff0800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xbff0800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x1e1800001e180000; +- *((unsigned long*)& __m256d_op1[2]) = 0x1e18000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x1e1800001e180000; +- *((unsigned long*)& __m256d_op1[0]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x1e1800001e180000; +- *((unsigned long*)& __m256d_op0[2]) = 0x1e18000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x1e1800001e180000; +- *((unsigned long*)& __m256d_op0[0]) = 0x1e18000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x2f03988e2052463e; +- *((unsigned long*)& __m256d_result[2]) = 0x2f03988e1409212e; +- *((unsigned long*)& __m256d_result[1]) = 0x2f03988e2052463e; +- *((unsigned long*)& __m256d_result[0]) = 0x2f03988e1409212e; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0200020002000200; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000397541c58; +- *((unsigned long*)& __m256i_result[3]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_result[2]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_result[1]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_result[0]) = 0x97541c5897541c58; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0080200000802000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0080200000802000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00200020ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x1e0000001e000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00200020ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x1e0000001e000000; +- __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffe0ffe0ffe0ffe0; +- *((unsigned long*)& __m256i_result[2]) = 0xffe0ffe0ffe0ffe0; +- *((unsigned long*)& __m256i_result[1]) = 0xffe0ffe0ffe0ffe0; +- *((unsigned long*)& __m256i_result[0]) = 0xffe0ffe0ffe0ffe0; +- __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0080200000802000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0080200000802000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00800080ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00800080ffffffff; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_h(__m128i_op0,6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe0ffe0ffe0ffe0; +- *((unsigned long*)& __m256i_op0[2]) = 0xffe0ffe0ffe0ffe0; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe0ffe0ffe0ffe0; +- *((unsigned long*)& __m256i_op0[0]) = 0xffe0ffe0ffe0ffe0; +- *((unsigned long*)& __m256i_op1[3]) = 0x1e1800001e180000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1e1800001e180000; +- *((unsigned long*)& __m256i_op1[0]) = 0x1e18000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000001e18; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000001e18; +- __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x70); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00800080ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00800080ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001e18; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001e18; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff001f; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000007fe268; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff001f; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000007fe268; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x1e17ffffd0fc6772; +- *((unsigned long*)& __m256i_op1[2]) = 0x1e17ffffebf6ded2; +- *((unsigned long*)& __m256i_op1[1]) = 0x1e17ffffd0fc6772; +- *((unsigned long*)& __m256i_op1[0]) = 0x1e17ffffebf6ded2; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xe1e800002f03988d; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xe1e800002f03988d; +- __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffff001f; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x007fe268; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffff001f; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x007fe268; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0xffff001f; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x007fe268; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0xffff001f; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x007fe268; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0xffff001f; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0xffff001f; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7a7cad6eca32ccc1; +- *((unsigned long*)& __m256i_op1[2]) = 0x7a7cad6efe69abd1; +- *((unsigned long*)& __m256i_op1[1]) = 0x7a7cad6eca32ccc1; +- *((unsigned long*)& __m256i_op1[0]) = 0x7a7cad6efe69abd1; +- *((unsigned long*)& __m256i_result[3]) = 0xff86005300360034; +- *((unsigned long*)& __m256i_result[2]) = 0xff86005300020055; +- *((unsigned long*)& __m256i_result[1]) = 0xff86005300360034; +- *((unsigned long*)& __m256i_result[0]) = 0xff86005300020055; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_op0[2]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_op0[1]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_op0[0]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffc00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffc00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffc00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffc00000000; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x22); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0xff800000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001e18; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001e18; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001e18; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001e18; +- *((unsigned long*)& __m256i_result[3]) = 0x000000010000ffe1; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000101001e18; +- *((unsigned long*)& __m256i_result[1]) = 0x000000010000ffe1; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000101001e18; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffeff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffeff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff001f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff001f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000001e18; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000000000ffe0; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000001e18; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff1f; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffeff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff1f; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffeff; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffdfe; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffdfe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xe1e800002f03988d; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xe1e800002f03988d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff0f400001781cc4; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff0f400001781cc4; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; +- __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256d_op0[2]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256d_op0[1]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256d_op0[0]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256d_op1[3]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256d_op1[2]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256d_op1[1]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256d_op1[0]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_op0[2]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_op0[1]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_op0[0]) = 0x97541c5897541c58; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffeffffff88; +- *((unsigned long*)& __m256i_op0[2]) = 0x61e0000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffeffffff88; +- *((unsigned long*)& __m256i_op0[0]) = 0x61e0000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvreplve0_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffefe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffefe; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01fe02; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01fe02; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; +- __m128d_out = __lsx_vfrecip_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x7ff80000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff7fffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff8000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffff7fffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffff8000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000003fff; +- *((unsigned long*)& __m128i_result[0]) = 0x7ff8010000000001; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_result[1]) = 0x0005252800052528; +- *((unsigned long*)& __m128i_result[0]) = 0x0005252800052528; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_d(__m128i_op0,7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_result[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_result[0]) = 0x52527d7d52527d7d; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001010101; +- __m256i_out = __lasx_xvexth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x3f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000010000ffe1; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000101001e18; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000010000ffe1; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000101001e18; +- *((unsigned long*)& __m256i_op1[3]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op1[2]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op1[1]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op1[0]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000010000ffe1; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000101001e18; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000010000ffe1; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000101001e18; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000101001e18; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000101001e18; +- __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op0[2]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op0[1]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op0[0]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffeffffff88; +- *((unsigned long*)& __m256i_op1[2]) = 0x61e0000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffeffffff88; +- *((unsigned long*)& __m256i_op1[0]) = 0x61e0000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0010ffc80010ff52; +- *((unsigned long*)& __m256i_result[2]) = 0xfff1ffca0011ffcb; +- *((unsigned long*)& __m256i_result[1]) = 0x0010ffc80010ff52; +- *((unsigned long*)& __m256i_result[0]) = 0xfff1ffca0011ffcb; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffefe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffefb; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffefb; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffefe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x67eee33567eee435; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x67eee33567eee435; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00e0000000e00000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010ffc80010ff52; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff1ffca0011ffcb; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010ffc80010ff52; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff1ffca0011ffcb; +- *((unsigned long*)& __m256i_result[3]) = 0x0010bfc80010bf52; +- *((unsigned long*)& __m256i_result[2]) = 0xfff1bfca0011bfcb; +- *((unsigned long*)& __m256i_result[1]) = 0x0010bfc80010bf52; +- *((unsigned long*)& __m256i_result[0]) = 0xfff1bfca0011bfcb; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffff7fffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffff8000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000808081; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000808081; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000808081; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000808081; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op0[2]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op0[1]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op0[0]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op1[3]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op1[2]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op1[1]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_op1[0]) = 0x98111cca98111cca; +- *((unsigned long*)& __m256i_result[3]) = 0x0000399400003994; +- *((unsigned long*)& __m256i_result[2]) = 0x0000399400003994; +- *((unsigned long*)& __m256i_result[1]) = 0x0000399400003994; +- *((unsigned long*)& __m256i_result[0]) = 0x0000399400003994; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00e0000000e00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000e0000000e0; +- __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000e0000000e0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00e0000000e00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000e0000000e0; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x01010101; +- *((int*)& __m256_op0[5]) = 0x55555501; +- *((int*)& __m256_op0[4]) = 0xfefefeab; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x01010101; +- *((int*)& __m256_op0[1]) = 0x55555501; +- *((int*)& __m256_op0[0]) = 0xfefefeab; +- *((int*)& __m256_op1[7]) = 0x00000105; +- *((int*)& __m256_op1[6]) = 0xfffffefb; +- *((int*)& __m256_op1[5]) = 0xffffff02; +- *((int*)& __m256_op1[4]) = 0x000000fe; +- *((int*)& __m256_op1[3]) = 0x00000105; +- *((int*)& __m256_op1[2]) = 0xfffffefb; +- *((int*)& __m256_op1[1]) = 0xffffff02; +- *((int*)& __m256_op1[0]) = 0x000000fe; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000e0000000e0; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000fc00; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000fc00; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x01010101; +- *((int*)& __m256_op0[5]) = 0x55555501; +- *((int*)& __m256_op0[4]) = 0xfefefeab; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x01010101; +- *((int*)& __m256_op0[1]) = 0x55555501; +- *((int*)& __m256_op0[0]) = 0xfefefeab; +- *((int*)& __m256_op1[7]) = 0x0010bfc8; +- *((int*)& __m256_op1[6]) = 0x0010bf52; +- *((int*)& __m256_op1[5]) = 0xfff1bfca; +- *((int*)& __m256_op1[4]) = 0x0011bfcb; +- *((int*)& __m256_op1[3]) = 0x0010bfc8; +- *((int*)& __m256_op1[2]) = 0x0010bf52; +- *((int*)& __m256_op1[1]) = 0xfff1bfca; +- *((int*)& __m256_op1[0]) = 0x0011bfcb; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x1414141414141415; +- *((unsigned long*)& __m128i_result[0]) = 0x1414141414141415; +- __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffff1f; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffeff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff1f; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffeff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0010ffc80010ff52; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff1ffca0011ffcb; +- *((unsigned long*)& __m256i_op1[1]) = 0x0010ffc80010ff52; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff1ffca0011ffcb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfff1ffca0011feca; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfff1ffca0011feca; +- __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010bfc80010bf52; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff1bfca0011bfcb; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010bfc80010bf52; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff1bfca0011bfcb; +- *((unsigned long*)& __m256i_result[3]) = 0xf5f5bfc8f5f5bff5; +- *((unsigned long*)& __m256i_result[2]) = 0xf5f1bfcaf5f5bfcb; +- *((unsigned long*)& __m256i_result[1]) = 0xf5f5bfc8f5f5bff5; +- *((unsigned long*)& __m256i_result[0]) = 0xf5f1bfcaf5f5bfcb; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefb; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffefb; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; +- int_op1 = 0x0000000059815d00; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; +- __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000399400003994; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000399400003994; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000399400003994; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000399400003994; +- *((unsigned long*)& __m256i_result[3]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000fff00000fff; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_du(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128d_op1[0]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000052527d7d; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000052527d7d; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000000000fc00; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000fc00; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf5f5bfbaf5f5bfbe; +- *((unsigned long*)& __m256i_op0[2]) = 0xf5f0bfb8f5d8bfe8; +- *((unsigned long*)& __m256i_op0[1]) = 0xf5f5bfbaf5f5bfbe; +- *((unsigned long*)& __m256i_op0[0]) = 0xf5f0bfb8f5d8bfe8; +- *((unsigned long*)& __m256i_op1[3]) = 0xf5f5bfbaf5f5bfbe; +- *((unsigned long*)& __m256i_op1[2]) = 0xf5f0bfb8f5d8bfe8; +- *((unsigned long*)& __m256i_op1[1]) = 0xf5f5bfbaf5f5bfbe; +- *((unsigned long*)& __m256i_op1[0]) = 0xf5f0bfb8f5d8bfe8; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff5f5c; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x6c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256d_op2[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256d_op2[2]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256d_op2[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256d_op2[0]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff5f5c; +- __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff5f5c; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000105fffffefb; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffff02000000fe; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000105fffffefb; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffff02000000fe; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffff1f; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffeff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff1f; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffeff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000105fffffefb; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff02000000fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000105fffffefb; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff02000000fe; +- *((unsigned long*)& __m256i_result[3]) = 0xf7ffffffffffff1f; +- *((unsigned long*)& __m256i_result[2]) = 0xbffffffffffffeff; +- *((unsigned long*)& __m256i_result[1]) = 0xf7ffffffffffff1f; +- *((unsigned long*)& __m256i_result[0]) = 0xbffffffffffffeff; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7ffffffffffff1f; +- *((unsigned long*)& __m256i_op0[2]) = 0xbffffffffffffeff; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7ffffffffffff1f; +- *((unsigned long*)& __m256i_op0[0]) = 0xbffffffffffffeff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_result[3]) = 0xfff6fffefffe005b; +- *((unsigned long*)& __m256i_result[2]) = 0xffbefffefffe005a; +- *((unsigned long*)& __m256i_result[1]) = 0xfff6fffefffe005b; +- *((unsigned long*)& __m256i_result[0]) = 0xffbefffefffe005a; +- __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000e0000000e0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000e0000000e0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000c400; +- __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00217f19ffde80e6; +- *((unsigned long*)& __m256i_op0[2]) = 0x00037f94fffc806b; +- *((unsigned long*)& __m256i_op0[1]) = 0x00217f19ffde80e6; +- *((unsigned long*)& __m256i_op0[0]) = 0x00037f94fffc806b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000fff00000fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[2]) = 0xff00ff0fff005f0f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[0]) = 0xff00ff0fff005f0f; +- __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xf7ffffffffffff1f; +- *((unsigned long*)& __m256i_op1[2]) = 0xbffffffffffffeff; +- *((unsigned long*)& __m256i_op1[1]) = 0xf7ffffffffffff1f; +- *((unsigned long*)& __m256i_op1[0]) = 0xbffffffffffffeff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x7); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff605a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff605a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0101000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101000000000000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffff5f5c; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffff5f5c; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffff5f5c; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffff5f5c; +- *((int*)& __m256_op2[7]) = 0x0000000f; +- *((int*)& __m256_op2[6]) = 0x0000000f; +- *((int*)& __m256_op2[5]) = 0xff00ff0f; +- *((int*)& __m256_op2[4]) = 0xff005f0f; +- *((int*)& __m256_op2[3]) = 0x0000000f; +- *((int*)& __m256_op2[2]) = 0x0000000f; +- *((int*)& __m256_op2[1]) = 0xff00ff0f; +- *((int*)& __m256_op2[0]) = 0xff005f0f; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffff5f5c; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffff5f5c; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffff5f5c; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffff5f5c; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000c400; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x001000100010c410; +- __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op1[0]) = 0x001000100010c410; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007fff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff7fffffff; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; +- __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0xfe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff605a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff605a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffebeb8; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffebeb8; +- __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffff5f5c; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffff605a; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffff5f5c; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffff605a; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffff5f5c; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffff605a; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffff5f5c; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffff605a; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256d_op0[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000000000fe; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffffebeb8; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffffebeb8; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op0[0]) = 0x001000100010c410; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00007fff7fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007fff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x37); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff605a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff605a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff605a; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff5f5c; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff605a; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x2d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0x0060005a; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x0060005a; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0x5f13ccf5; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0x5f13ccf5; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); +- *((int*)& __m256_op0[7]) = 0xfffffff8; +- *((int*)& __m256_op0[6]) = 0xffffff08; +- *((int*)& __m256_op0[5]) = 0x00ff00f8; +- *((int*)& __m256_op0[4]) = 0x00ffcff8; +- *((int*)& __m256_op0[3]) = 0xfffffff8; +- *((int*)& __m256_op0[2]) = 0xffffff08; +- *((int*)& __m256_op0[1]) = 0x00ff00f8; +- *((int*)& __m256_op0[0]) = 0x00ffcff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000008000000080; +- __m256i_out = __lasx_xvfclass_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffff605a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffff605a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffff605a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffff605a; +- __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000000000000; +- __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0101008000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0101008000000080; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_w(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c545c5c5c5; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0x3a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op2[0]) = 0x001000100010c410; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x64); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c545c5c5c5; +- __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xc5c5c5c4; +- *((int*)& __m256_op0[6]) = 0xc5c5c5c4; +- *((int*)& __m256_op0[5]) = 0x45c5c5c5; +- *((int*)& __m256_op0[4]) = 0x45c5c5c5; +- *((int*)& __m256_op0[3]) = 0xc5c5c5c4; +- *((int*)& __m256_op0[2]) = 0xc5c5c5c4; +- *((int*)& __m256_op0[1]) = 0x45c5c5c5; +- *((int*)& __m256_op0[0]) = 0x45c5c5c5; +- *((int*)& __m256_result[7]) = 0xc5c5c800; +- *((int*)& __m256_result[6]) = 0xc5c5c800; +- *((int*)& __m256_result[5]) = 0x45c5c800; +- *((int*)& __m256_result[4]) = 0x45c5c800; +- *((int*)& __m256_result[3]) = 0xc5c5c800; +- *((int*)& __m256_result[2]) = 0xc5c5c800; +- *((int*)& __m256_result[1]) = 0x45c5c800; +- *((int*)& __m256_result[0]) = 0x45c5c800; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x44); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x4370100000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x4370100000000000; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000008000000080; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000008000000080; +- *((unsigned long*)& __m256d_op1[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256d_op1[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256d_op1[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256d_op1[0]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256d_result[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256d_result[0]) = 0x45c5c5c545c5c5c5; +- __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_result[3]) = 0xc5c4c5c5c5c5c5c5; +- *((unsigned long*)& __m256i_result[2]) = 0xc5c545c545c545c5; +- *((unsigned long*)& __m256i_result[1]) = 0xc5c4c5c5c5c5c5c5; +- *((unsigned long*)& __m256i_result[0]) = 0xc5c545c545c545c5; +- __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x3d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfrecip_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffff01ffffff08; +- *((unsigned long*)& __m256i_op1[2]) = 0x43700f0100003008; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffff01ffffff08; +- *((unsigned long*)& __m256i_op1[0]) = 0x43700f0100003008; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000f8; +- *((unsigned long*)& __m256i_result[2]) = 0xbc8ff0ffffffcff8; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000f8; +- *((unsigned long*)& __m256i_result[0]) = 0xbc8ff0ffffffcff8; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc5c4c5c5c5c5c5c5; +- *((unsigned long*)& __m256i_op0[2]) = 0xc5c545c545c545c5; +- *((unsigned long*)& __m256i_op0[1]) = 0xc5c4c5c5c5c5c5c5; +- *((unsigned long*)& __m256i_op0[0]) = 0xc5c545c545c545c5; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000ff000000f8; +- *((unsigned long*)& __m256i_op1[2]) = 0xbc8ff0ffffffcff8; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000f8; +- *((unsigned long*)& __m256i_op1[0]) = 0xbc8ff0ffffffcff8; +- *((unsigned long*)& __m256i_result[3]) = 0xfcfcfcfcfc040404; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fbfffffc; +- *((unsigned long*)& __m256i_result[1]) = 0xfcfcfcfcfc040404; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fbfffffc; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000059815d00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000007942652524; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4265252400000000; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x000000ff; +- *((int*)& __m256_op0[6]) = 0x000000f8; +- *((int*)& __m256_op0[5]) = 0xbc8ff0ff; +- *((int*)& __m256_op0[4]) = 0xffffcff8; +- *((int*)& __m256_op0[3]) = 0x000000ff; +- *((int*)& __m256_op0[2]) = 0x000000f8; +- *((int*)& __m256_op0[1]) = 0xbc8ff0ff; +- *((int*)& __m256_op0[0]) = 0xffffcff8; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffff8ffffff08; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00f800ffcff8; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffff8ffffff08; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00f800ffcff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000003868686a20; +- *((unsigned long*)& __m256i_result[2]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000003868686a20; +- *((unsigned long*)& __m256i_result[0]) = 0x0045b8ae81bce1d8; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x21); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000003868686a20; +- *((unsigned long*)& __m256d_op1[2]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000003868686a20; +- *((unsigned long*)& __m256d_op1[0]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000003868686a20; +- *((unsigned long*)& __m256d_op1[2]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000003868686a20; +- *((unsigned long*)& __m256d_op1[0]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4ee85545068f3133; +- *((unsigned long*)& __m128i_op0[0]) = 0x870968c1f56bb3cd; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x004e005500060031; +- *((unsigned long*)& __m128i_result[0]) = 0xff870068fff5ffb3; +- __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x42652524; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000003900000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x004e005500060031; +- *((unsigned long*)& __m128i_op1[0]) = 0xff870068fff5ffb3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c5c5c5c5c5; +- *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c645c5c5c6; +- *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c5c5c5c5c5; +- *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c645c5c5c6; +- __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000003868686a20; +- *((unsigned long*)& __m256i_op0[2]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000003868686a20; +- *((unsigned long*)& __m256i_op0[0]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001a00000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000900000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001a00000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000900000000; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0027002a00030018; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f4300177f7a7f59; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0027002a00080018; +- *((unsigned long*)& __m128i_result[0]) = 0x7f4300177f7a7f59; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_op0[0]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_result[2]) = 0x45c5c5c545c5c5c5; +- *((unsigned long*)& __m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; +- *((unsigned long*)& __m256i_result[0]) = 0x45c5c5c545c5c5c5; +- __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xb0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000003868686a20; +- *((unsigned long*)& __m256d_op0[2]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000003868686a20; +- *((unsigned long*)& __m256d_op0[0]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x47); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000003868686a20; +- *((unsigned long*)& __m256i_op0[2]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000003868686a20; +- *((unsigned long*)& __m256i_op0[0]) = 0x0045b8ae81bce1d8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00386a20b8aee1d8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00386a20b8aee1d8; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004e005500060031; +- *((unsigned long*)& __m128i_op0[0]) = 0xff870068fff5ffb3; +- *((unsigned long*)& __m128i_op1[1]) = 0x004e005500060031; +- *((unsigned long*)& __m128i_op1[0]) = 0xff870068fff5ffb3; +- *((unsigned long*)& __m128i_result[1]) = 0x04e00060ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x04e00060ffffffff; +- __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x454c2996; +- *((int*)& __m128_op0[2]) = 0x0ffe354e; +- *((int*)& __m128_op0[1]) = 0x9e063f80; +- *((int*)& __m128_op0[0]) = 0x2742ba3e; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x42652524; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; +- __m256i_out = __lasx_xvclz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x04e00060ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x04e00060ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x04e00060ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x04e00060ffffffff; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op1[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op1[0]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4ee85545ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x870968c1f56bb3cd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x870968c1f56bb3cd; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000001a00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001a00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x870968c1f56bb3cd; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000001a00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001a00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000001a; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000001a; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000007fff800000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op0[0]) = 0x52527d7d52527d7d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000007fff800000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2e2b34ca59fa4c88; +- *((unsigned long*)& __m128i_op0[0]) = 0x3b2c8aefd44be966; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_hu(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2e2b34ca59fa4c88; +- *((unsigned long*)& __m128i_op0[0]) = 0x3b2c8aefd44be966; +- *((unsigned long*)& __m128i_result[1]) = 0x3e2b34ca59fa4c88; +- *((unsigned long*)& __m128i_result[0]) = 0x3b2c8aefd44be966; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000001a00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000900000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000001a00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000900000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; +- __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x04e00060ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x04e00060ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x007fffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x007fffffffffffff; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfe01fe01fe01fe01; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fe01fe01; +- __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_d(__m256i_op0,11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3e2b34ca59fa4c88; +- *((unsigned long*)& __m128i_op0[0]) = 0x3b2c8aefd44be966; +- *((unsigned long*)& __m128i_result[1]) = 0x0007658000115de0; +- *((unsigned long*)& __m128i_result[0]) = 0x001a8960001d2cc0; +- __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_b(__m128i_op0,0x4); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_result[2]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_result[1]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_result[0]) = 0xe9e9e9e9e9e9e9e9; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808000008080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080000080800000; +- __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808000008080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080000080800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfe01fe01fe01fe01; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fe01fe01; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_op0[2]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_op0[1]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_op0[0]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_result[3]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_result[2]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_result[1]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_result[0]) = 0xe9e9e9e9e9e9e9e9; +- __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xf7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808000008080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080000080800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001010100010100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x2f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0007658000115de0; +- *((unsigned long*)& __m128i_op0[0]) = 0x001a8960001d2cc0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4000400040004000; +- *((unsigned long*)& __m128i_result[0]) = 0x4000400040004000; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00007fffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00007fffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff8001; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff8001; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8001; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff8001; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff0ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff0ffff0000; +- __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0007658000115de0; +- *((unsigned long*)& __m128i_op1[0]) = 0x001a8960001d2cc0; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffffff00ffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffffff; +- __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xfe07e5fe; +- *((int*)& __m128_op0[2]) = 0xfefdddfe; +- *((int*)& __m128_op0[1]) = 0x00020100; +- *((int*)& __m128_op0[0]) = 0xfedd0c00; +- *((int*)& __m128_result[3]) = 0x7fc00000; +- *((int*)& __m128_result[2]) = 0x7fc00000; +- *((int*)& __m128_result[1]) = 0x1e801ffc; +- *((int*)& __m128_result[0]) = 0x7fc00000; +- __m128_out = __lsx_vfsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xef0179a47c793879; +- *((unsigned long*)& __m128d_op0[0]) = 0x9f9e7e3e9ea3ff41; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128d_op2[0]) = 0x1e801ffc7fc00000; +- *((unsigned long*)& __m128d_result[1]) = 0xffc000007fc00000; +- *((unsigned long*)& __m128d_result[0]) = 0x9e801ffc7fc00000; +- __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff80fe; +- *((unsigned long*)& __m256i_op0[2]) = 0xd52aaaaa555555ab; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff80fe; +- *((unsigned long*)& __m256i_op0[0]) = 0xd52aaaaa555555ab; +- *((unsigned long*)& __m256i_result[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[0]) = 0x555555ab555555ab; +- __m256i_out = __lasx_xvreplve0_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc7fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfe07e5fefefdddfe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00020100fedd0c00; +- *((unsigned long*)& __m128i_result[1]) = 0xff02ff1bff02ff23; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffffff02fff4; +- __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffc0008001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffffc0008001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffc0008001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffffc0008001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffffc0007fe9; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffffc0007fe9; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffffc0007fe9; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffffc0007fe9; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc7fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00003fe00ffe3fe0; +- __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op1[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op1[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op1[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op1[0]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[0]) = 0x555555ab555555ab; +- __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0007658000115de0; +- *((unsigned long*)& __m128i_op0[0]) = 0x001a8960001d2cc0; +- *((unsigned long*)& __m128i_op1[1]) = 0xffc000007fc00000; +- *((unsigned long*)& __m128i_op1[0]) = 0x9e801ffc7fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ffff0000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff00ff0000ff; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffff0ffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff0ffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000017ffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xff02ff1bff02ff23; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000ffffff02fff4; +- *((unsigned long*)& __m128i_result[1]) = 0xff02ff1bff02ff23; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00003fe00ffe3fe0; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff02ff1bff02ff23; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffff02fff4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; +- __m256i_out = __lasx_xvmini_w(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010000; +- __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; +- int_op1 = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[0]) = 0x555555ab555555ab; +- __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[3]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_result[2]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_result[1]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_result[0]) = 0x005500550055ffab; +- __m256i_out = __lasx_xvexth_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op1[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op1[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op1[0]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000080008000; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000080008000; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; +- __m128i_out = __lsx_vmaxi_d(__m128i_op0,2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7f0000007f000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7f0000007f000000; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffff0ffff0000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffff0ffff0000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff02ff1bff02ff23; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffff02fff4; +- *((unsigned long*)& __m128i_op1[1]) = 0xff02ff1bff02ff23; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffff02fff4; +- *((unsigned long*)& __m128i_op2[1]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_op2[0]) = 0x1e801ffc7fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x7e44bde9b842ff23; +- *((unsigned long*)& __m128i_result[0]) = 0x00011e80007edff8; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[2]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[1]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_op0[0]) = 0x555555ab555555ab; +- *((unsigned long*)& __m256i_result[3]) = 0x1555156a1555156a; +- *((unsigned long*)& __m256i_result[2]) = 0x1555156a1555156a; +- *((unsigned long*)& __m256i_result[1]) = 0x1555156a1555156a; +- *((unsigned long*)& __m256i_result[0]) = 0x1555156a1555156a; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_h(__m128i_op0,6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff020000fff4; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc7fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00001ee100000000; +- __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f0000007f000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f0000007f000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1555156a1555156a; +- *((unsigned long*)& __m256i_op1[2]) = 0x1555156a1555156a; +- *((unsigned long*)& __m256i_op1[1]) = 0x1555156a1555156a; +- *((unsigned long*)& __m256i_op1[0]) = 0x1555156a1555156a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7e44bde9b842ff23; +- *((unsigned long*)& __m128i_op1[0]) = 0x00011e80007edff8; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001ffffff; +- __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1e801ffc7fc00000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ed0008005e00a2; +- *((unsigned long*)& __m128i_op1[0]) = 0x007a007600150077; +- *((unsigned long*)& __m128i_result[1]) = 0x0003000000010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0007007f03fe0000; +- __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3ff0000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff020000fff4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ed0008005e00a2; +- *((unsigned long*)& __m128i_op0[0]) = 0x007a007600150077; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ed0008005e00a2; +- *((unsigned long*)& __m128i_op1[0]) = 0x007a007600150077; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fc0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1e801ffc00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff020000fff4; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x7fc0000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1e801ffc00000000; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x0000ffff; +- *((int*)& __m256_op0[6]) = 0xc0008001; +- *((int*)& __m256_op0[5]) = 0x0000ffff; +- *((int*)& __m256_op0[4]) = 0xc0008001; +- *((int*)& __m256_op0[3]) = 0x0000ffff; +- *((int*)& __m256_op0[2]) = 0xc0008001; +- *((int*)& __m256_op0[1]) = 0x0000ffff; +- *((int*)& __m256_op0[0]) = 0xc0008001; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffe; +- __m256i_out = __lasx_xvftint_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- int_op1 = 0x0000007942652524; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffff2524ffffffff; +- __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op0[2]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op0[1]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op0[0]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_result[3]) = 0x0004000400040805; +- *((unsigned long*)& __m256i_result[2]) = 0x0004000400040805; +- *((unsigned long*)& __m256i_result[1]) = 0x0004000400040805; +- *((unsigned long*)& __m256i_result[0]) = 0x0004000400040805; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0ff8010000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0ff8010000000000; +- __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff020000fff4; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fc0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1e801ffc00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000080007f80800; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001000000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0004007c00fc0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x3ff1808001020101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x3ff1808001020101; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0004007c00fc0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x047c0404fc00fcfc; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x8a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xe17fe003; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0004007c00fc0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000fc0000; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfe07e5fefefdddfe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00020100fedd0c00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000201000000000b; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000b0000000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fc0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x0002010000fc000b; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000b0000000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000201000000000b; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000080007f80800; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000001000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x000000ff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x000000ff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x0000ffff; +- *((int*)& __m256_op1[6]) = 0xc0008001; +- *((int*)& __m256_op1[5]) = 0x0000ffff; +- *((int*)& __m256_op1[4]) = 0xc0008001; +- *((int*)& __m256_op1[3]) = 0x0000ffff; +- *((int*)& __m256_op1[2]) = 0xc0008001; +- *((int*)& __m256_op1[1]) = 0x0000ffff; +- *((int*)& __m256_op1[0]) = 0xc0008001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000080007f80800; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00047fff00007fff; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000b0000000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0005000501800005; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001fc0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x3ff1808001020101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x3ff1808001020101; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000ff7f1080ef8; +- *((unsigned long*)& __m256i_op1[2]) = 0x0100000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000ff7f1080ef8; +- *((unsigned long*)& __m256i_op1[0]) = 0x0100000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x003ff18080010201; +- *((unsigned long*)& __m256i_result[2]) = 0x0100000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x003ff18080010201; +- *((unsigned long*)& __m256i_result[0]) = 0x0100000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op0[2]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op0[1]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op0[0]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op1[3]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op1[2]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op1[1]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_op1[0]) = 0x005500550055ffab; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x003ff18080010201; +- *((unsigned long*)& __m256i_op0[2]) = 0x0100000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x003ff18080010201; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000f18080010000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000f18080010000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000b0000000b; +- *((unsigned long*)& __m128i_op1[0]) = 0x000201000000000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000201000000000b; +- __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000fffe00010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000fffe00010001; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0555550000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0555550000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_result[3]) = 0x0555550000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0555550000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[2]) = 0x3ff1808001020101; +- *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[0]) = 0x3ff1808001020101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x00550000ffab0001; +- *((unsigned long*)& __m256i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x00550000ffab0001; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000f18080010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000f18080010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000078c0c0008000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000078c0c0008000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8493941335f5cc0c; +- *((unsigned long*)& __m128i_op0[0]) = 0x625a7312befcb21e; +- *((unsigned long*)& __m128d_result[1]) = 0x43e092728266beba; +- *((unsigned long*)& __m128d_result[0]) = 0x43d8969cc4afbf2d; +- __m128d_out = __lsx_vffint_d_lu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00fc0000; +- *((int*)& __m128_op1[3]) = 0xfe07e5fe; +- *((int*)& __m128_op1[2]) = 0xfefdddfe; +- *((int*)& __m128_op1[1]) = 0x00020100; +- *((int*)& __m128_op1[0]) = 0xfedd0c00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- long_int_result = 0x0000000000000000; +- long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fc0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; +- __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfe07e5fefefdddfe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00020100fedd0c00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0005000501800005; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfe07e5fefefdddfe; +- *((unsigned long*)& __m128i_result[0]) = 0x00020100fedd0008; +- __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8493941335f5cc0c; +- *((unsigned long*)& __m128i_op1[0]) = 0x625a7312befcb21e; +- *((unsigned long*)& __m128i_result[1]) = 0x8493941300000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000002befcb21e; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[2]) = 0x3ff1808001020101; +- *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[0]) = 0x3ff1808001020101; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0ff80100ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0ff80100ffffffff; +- __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000201000000000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000020100; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000fc0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000201000000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007fff8000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001008100000005; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x84939413; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000002; +- *((int*)& __m128_op0[0]) = 0xbefcb21e; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000017000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000017000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000017000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000017000000080; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000007fff8000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001008100000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0800080077ff8800; +- *((unsigned long*)& __m128i_result[0]) = 0x0801088108000805; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x7fff8000; +- *((int*)& __m128_op0[1]) = 0x00010081; +- *((int*)& __m128_op0[0]) = 0x00000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000020000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; +- __m128i_out = __lsx_vfclass_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x01000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x01000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000f18080010000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000f18080010000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x3b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000017000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000017000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x43e092728266beba; +- *((unsigned long*)& __m128i_op1[0]) = 0x43d8969cc4afbf2d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_h(__m128i_op0,-11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000001e; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000001e; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000017000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000017000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001700080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001700080; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000020000000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000100000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000080000; +- __m128i_out = __lsx_vsrli_d(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000210011084; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001e1f; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x3d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000100000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000080000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_hu(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0x0000ffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0ff80100ffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0ff80100ffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000017000000080; +- *((unsigned long*)& __m256d_op1[2]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000017000000080; +- *((unsigned long*)& __m256d_op1[0]) = 0xc06500550055ffab; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000170; +- *((int*)& __m256_op0[6]) = 0x00000080; +- *((int*)& __m256_op0[5]) = 0xc0650055; +- *((int*)& __m256_op0[4]) = 0x0055ffab; +- *((int*)& __m256_op0[3]) = 0x00000170; +- *((int*)& __m256_op0[2]) = 0x00000080; +- *((int*)& __m256_op0[1]) = 0xc0650055; +- *((int*)& __m256_op0[0]) = 0x0055ffab; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001700080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001700080; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x4177000800000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x4177000800000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffintl_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001700080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001700080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001700080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001700080; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; +- __m128i_out = __lsx_vfclass_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffe90ffffff80; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffe90ffffff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff90ffffff80; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- long_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; +- __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff90ffffff80; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; +- __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000210011084; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff01ff70ff01ff80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff01ff70ff01ff80; +- __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff90ffffff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000006f0000007f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000006f0000007f; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000017fff9000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000210011084; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000007fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001001; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000feff01; +- *((unsigned long*)& __m128i_result[0]) = 0x00feff0100000000; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffff90; +- *((int*)& __m256_op0[4]) = 0xffffff80; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffff90; +- *((int*)& __m256_op0[0]) = 0xffffff80; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_h(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x3); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_result[2]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_result[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_result[0]) = 0xfff6fff6fff6fff6; +- __m256i_out = __lasx_xvmini_h(__m256i_op0,-10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001ffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfrint_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0001ffff00000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m128d_result[1]) = 0x5ff6a0a40ea8f47c; +- *((unsigned long*)& __m128d_result[0]) = 0x5ff6a0a40e9da42a; +- __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x5ff6a0a40ea8f47c; +- *((unsigned long*)& __m128i_op1[0]) = 0x5ff6a0a40e9da42a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0001ffff00000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; +- __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrml_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0xdb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000006f0000007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000006f0000007f; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffff90ffffff81; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffff90ffffff81; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ff90ff81; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ff90ff81; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000007f; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff90ff81; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff90ff81; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffff90ffffff81; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffff90ffffff81; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ff0000ff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x01fc020000fe0100; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffecffffffec; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op2[2]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op2[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op2[0]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffecffffffec; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5ff6a0a40ea8f47c; +- *((unsigned long*)& __m128i_op0[0]) = 0x5ff6a0a40e9da42a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_w(__m128i_op0,-11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001ffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ff0000ff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x01fc020000fe0100; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000ff0000; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrml_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001ffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; +- __m128d_out = __lsx_vflogb_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ff0000ff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x01fc020000fe0100; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x7); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff0000ff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x4b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffff90ffffff81; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffff90ffffff81; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff90ff81; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff90ff81; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; +- int_result = 0x000000000000007f; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x4); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,-8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x0000ffff; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x0000ffff; +- *((int*)& __m128_op1[0]) = 0x0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0000ffff; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x0000ffff; +- *((int*)& __m128_op0[0]) = 0x0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffff6ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffff6ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ff0000ff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x01fc020000fe0100; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffff6ff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffff6ff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000900ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000900ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_d(__m256i_op0,-9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ff0000ff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x01fc020000fe0100; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000003fc0003; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x56); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffff6ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffff6ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x28); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000f4012ceb; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000f4012ceb; +- __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000017fda829; +- __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000017f0a82; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000fb8000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000fb8000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000017f0a82; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000003f; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000017f0a82; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000f6ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000f6ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007fffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x007fffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x007fffff00000000; +- __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128i_result[1]) = 0x0040004000400040; +- *((unsigned long*)& __m128i_result[0]) = 0x0040004017fda869; +- __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x17fda829; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x14131211100f0e0d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0c0b0a0908070605; +- *((unsigned long*)& __m256i_op0[1]) = 0x14131211100f0e0d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0c0b0a0908070605; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0a09080706050403; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0a09080706050403; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000017fda829; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000001e5; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x5000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000f6ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000001; +- *((int*)& __m128_op0[2]) = 0xfffffffe; +- *((int*)& __m128_op0[1]) = 0x00000001; +- *((int*)& __m128_op0[0]) = 0xfffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_w(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_hu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vslti_b(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0a09080706050403; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0a09080706050403; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0504840303028201; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0504840303028201; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x007fffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000fffe0001fffe; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000003ffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000003ffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000003ffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000003ffffffffff; +- __m256i_out = __lasx_xvsat_du(__m256i_op0,0x29); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffe6ffffffe6; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffe6ffffffe6; +- __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0a09080706050403; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0a09080706050403; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0003000200000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0003000200000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000017fda829; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x5c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000055555501; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000005555555554; +- *((unsigned long*)& __m128i_result[1]) = 0x0000005555555554; +- *((unsigned long*)& __m128i_result[0]) = 0x0000005555555554; +- __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff00ff7f00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x32); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007fffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001000f000e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fff1000ffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000f000e; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000ffffe; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000005555555554; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000005555555554; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001000f000e; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000fff1000ffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002a55005501; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002a55000001; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000002a55005501; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000002a55000001; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x36280000; +- *((int*)& __m128_result[1]) = 0x42a00000; +- *((int*)& __m128_result[0]) = 0x42a02000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0xff00ff7f; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000f000e; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000ffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x003fffff00070007; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000007ffff; +- __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000036280000; +- *((unsigned long*)& __m128i_op1[0]) = 0x42a0000042a02000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000036280000; +- *((unsigned long*)& __m128i_op0[0]) = 0x42a0000042a02000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x9f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x2c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff7fffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0040000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x80808080; +- *((int*)& __m256_op0[6]) = 0x80808080; +- *((int*)& __m256_op0[5]) = 0x80808080; +- *((int*)& __m256_op0[4]) = 0x80808080; +- *((int*)& __m256_op0[3]) = 0x80808080; +- *((int*)& __m256_op0[2]) = 0x80808080; +- *((int*)& __m256_op0[1]) = 0x80808080; +- *((int*)& __m256_op0[0]) = 0x80808080; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x80000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000005555555554; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000005555555554; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0xe2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000036280001; +- *((unsigned long*)& __m128i_op0[0]) = 0x42a0000042a02001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000005555555554; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000005555555554; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000036280001; +- *((unsigned long*)& __m128i_result[0]) = 0x42a0000042a02001; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000036280001; +- *((unsigned long*)& __m128i_op0[0]) = 0x42a0000042a02001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000036280001; +- __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xe0000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xe0000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xe0000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xe0000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x80000000; +- *((int*)& __m256_op1[4]) = 0x80000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x80000000; +- *((int*)& __m256_op1[0]) = 0x80000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x004200a000200001; +- __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff80000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff80000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff80000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff80000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfc00000000000000; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000001c; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000001c; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffe0000000; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001c; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001c; +- *((unsigned long*)& __m128i_result[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x004200a000200000; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffe0000000; +- __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x004200a000200000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfc00000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xfc00000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xfc00000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xfc00000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200000; +- *((unsigned long*)& __m128i_result[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x004200a000200000; +- __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff7fff; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x004200a000200001; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x7fff00007fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x004200a000200000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffffff; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; +- __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x004200a0; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x004200a0; +- *((int*)& __m128_op0[0]) = 0x00200001; +- *((int*)& __m128_op1[3]) = 0x004200a0; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x004200a0; +- *((int*)& __m128_op1[0]) = 0x00200000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffff000000; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffe003c1f0077; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffff0074230438; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000000438; +- __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004200a000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x004200a000200001; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_w(__m128i_op0,7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; +- __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000efffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffeffffffff; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[3]) = 0x1fffffff1fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0383634303836343; +- *((unsigned long*)& __m256i_result[1]) = 0x1fffffff1fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0383634303836343; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[0]) = 0x1c1b1a191c1b1a19; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffeffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffeffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffeffffffff; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001000000; +- __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x28); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xefffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000efffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000efffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xfffffffe; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000efffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000efffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000002; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000002; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x51); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1fffffff1fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0383634303836343; +- *((unsigned long*)& __m256i_op1[1]) = 0x1fffffff1fffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0383634303836343; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001000000; +- __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffeff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffeff00000000; +- __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000401000000; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[3]) = 0xffe4ffe6ffe5ffe6; +- *((unsigned long*)& __m256i_result[2]) = 0xffe4ffe6ffe5ffe6; +- *((unsigned long*)& __m256i_result[1]) = 0xffe4ffe6ffe5ffe6; +- *((unsigned long*)& __m256i_result[0]) = 0xffe4ffe6ffe5ffe6; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x68); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1fffffff1fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0383634303836343; +- *((unsigned long*)& __m256i_op0[1]) = 0x1fffffff1fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0383634303836343; +- *((unsigned long*)& __m256i_result[3]) = 0x0002ffff0002ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_result[1]) = 0x0002ffff0002ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; +- __m256i_out = __lasx_xvmini_h(__m256i_op0,2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000400000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000400000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000400000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000400000000; +- __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff1fffffff1; +- __m128i_out = __lsx_vsubi_wu(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_du(__m128i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x6c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffe4ffe6; +- *((int*)& __m256_op0[6]) = 0xffe5ffe6; +- *((int*)& __m256_op0[5]) = 0xffe4ffe6; +- *((int*)& __m256_op0[4]) = 0xffe5ffe6; +- *((int*)& __m256_op0[3]) = 0xffe4ffe6; +- *((int*)& __m256_op0[2]) = 0xffe5ffe6; +- *((int*)& __m256_op0[1]) = 0xffe4ffe6; +- *((int*)& __m256_op0[0]) = 0xffe5ffe6; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000401000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000402000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000402000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000402000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000402000000; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_h(__m256i_op0,-8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[3]) = 0x0036003200360032; +- *((unsigned long*)& __m256i_result[2]) = 0x0036003200360032; +- *((unsigned long*)& __m256i_result[1]) = 0x0036003200360032; +- *((unsigned long*)& __m256i_result[0]) = 0x0036003200360032; +- __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffff000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffff000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0xc4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0036003200360032; +- *((unsigned long*)& __m256i_op0[2]) = 0x0036003200360032; +- *((unsigned long*)& __m256i_op0[1]) = 0x0036003200360032; +- *((unsigned long*)& __m256i_op0[0]) = 0x0036003200360032; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmini_du(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x28); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((int*)& __m128_result[3]) = 0xffffe000; +- *((int*)& __m128_result[2]) = 0xffffe000; +- *((int*)& __m128_result[1]) = 0xffffe000; +- *((int*)& __m128_result[0]) = 0xffffe000; +- __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0002fffeffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0002fffeffff; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000900000009; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x99); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00007fff; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffff0002fffeffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffff0002fffeffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_result[0]) = 0x1c1b1a191c1b1a19; +- __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xd2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0xffffffffffffffff; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffe001ffffe001; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe001ffffe001; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[2]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op0[0]) = 0x1c1b1a191c1b1a19; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x1d1a1b181d1a1b18; +- *((unsigned long*)& __m256i_result[2]) = 0x9c9b9a999c9b9a99; +- *((unsigned long*)& __m256i_result[1]) = 0x1d1a1b181d1a1b18; +- *((unsigned long*)& __m256i_result[0]) = 0x9c9b9a999c9b9a99; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x03ff03ff03ff03ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03ff03ff03ff03ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x438ff81ff81ff820; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffint_d_l(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x438ff81ff81ff820; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x03ff03ff03ff03ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000043; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; +- __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x78); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0202020202020202; +- *((unsigned long*)& __m128i_result[0]) = 0x0202020202020202; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffe001ffffe001; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe001ffffe001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff000000000000; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff000000000000; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2000200020002000; +- *((unsigned long*)& __m256i_result[2]) = 0x2000200020002000; +- *((unsigned long*)& __m256i_result[1]) = 0x2000200020002000; +- *((unsigned long*)& __m256i_result[0]) = 0x2000200020002000; +- __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0200020002000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0200020002000200; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x73); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0200020002000200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0200020002000200; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff02000200; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff02000200; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe00001ffe200; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff02000200; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffdfff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffdfff; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffdfff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffdfff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe00001ffe200; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffdfff; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffe001; +- *((int*)& __m128_op0[2]) = 0xffffe001; +- *((int*)& __m128_op0[1]) = 0xffffe001; +- *((int*)& __m128_op0[0]) = 0xffffe001; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffe000; +- *((int*)& __m128_op1[0]) = 0x01ffe200; +- *((int*)& __m128_op2[3]) = 0x04040383; +- *((int*)& __m128_op2[2]) = 0x83838404; +- *((int*)& __m128_op2[1]) = 0x04040383; +- *((int*)& __m128_op2[0]) = 0x83838404; +- *((int*)& __m128_result[3]) = 0xffffe001; +- *((int*)& __m128_result[2]) = 0xffffe001; +- *((int*)& __m128_result[1]) = 0xffffe001; +- *((int*)& __m128_result[0]) = 0xffffe001; +- __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x30); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffff00000000; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op2[1]) = 0x03ff03ff03ff03ff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2000200020002000; +- *((unsigned long*)& __m256i_op0[2]) = 0x2000200020002000; +- *((unsigned long*)& __m256i_op0[1]) = 0x2000200020002000; +- *((unsigned long*)& __m256i_op0[0]) = 0x2000200020002000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007f000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fff0000; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000003fb000003fb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000003fb000003fb; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op0[0]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffff4fffffff4; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff4fffffff4; +- __m128i_out = __lsx_vmini_w(__m128i_op0,-12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000007f000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffdfff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffdfff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffe000; +- *((int*)& __m128_op1[0]) = 0x01ffe200; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0403cfcf01c1595e; +- *((unsigned long*)& __m128i_op0[0]) = 0x837cd5db43fc55d4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff80007fff; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_d(__m256i_op0,5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0403cfcf01c1595e; +- *((unsigned long*)& __m128i_op0[0]) = 0x837cd5db43fc55d4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000cb4a; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000cb4a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000f909; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op0[0]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op1[1]) = 0xf000e001bf84df83; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff8e001ff84e703; +- *((unsigned long*)& __m128i_result[1]) = 0x14042382c3ffa481; +- *((unsigned long*)& __m128i_result[0]) = 0x040c238283ff9d01; +- __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op0[0]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op1[1]) = 0x0403cfcf01c1595e; +- *((unsigned long*)& __m128i_op1[0]) = 0x837cd5db43fc55d4; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrecip_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf000e001bf84df83; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff8e001ff84e703; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ca354688; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff35cab978; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff35cab978; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff35cab978; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010035; +- __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ca354688; +- *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_result[1]) = 0x00040003ff83ff84; +- *((unsigned long*)& __m128i_result[0]) = 0x00040003ff4dffca; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000f909; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0403cfcf01c1595e; +- *((unsigned long*)& __m128i_op0[0]) = 0x837cd5db43fc55d4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_op1[0]) = 0x0404038383838404; +- *((unsigned long*)& __m128i_result[1]) = 0x0007005200440062; +- *((unsigned long*)& __m128i_result[0]) = 0x0080005e007f00d8; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000040400000383; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff8383ffff7d0d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000040400000383; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe000ffff1fff; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; +- *((unsigned long*)& __m128i_op0[0]) = 0x342caf9be5579ebe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000f909; +- *((unsigned long*)& __m128i_result[1]) = 0x0c03e17edd781b11; +- *((unsigned long*)& __m128i_result[0]) = 0x342caf9be55700b5; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ca354688; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op1[1]) = 0x00040003ff83ff84; +- *((unsigned long*)& __m128i_op1[0]) = 0x00040003ff4dffca; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000040400000383; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000383ffff1fff; +- __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000383ffff1fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ca354688; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000038335ca2777; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000404; +- *((int*)& __m128_op1[2]) = 0x00000383; +- *((int*)& __m128_op1[1]) = 0xffffe000; +- *((int*)& __m128_op1[0]) = 0xffff1fff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x7f800000; +- *((int*)& __m256_op1[6]) = 0x7f800000; +- *((int*)& __m256_op1[5]) = 0x7f800000; +- *((int*)& __m256_op1[4]) = 0x7f800000; +- *((int*)& __m256_op1[3]) = 0x7f800000; +- *((int*)& __m256_op1[2]) = 0x7f800000; +- *((int*)& __m256_op1[1]) = 0x7f800000; +- *((int*)& __m256_op1[0]) = 0x7f800000; +- *((int*)& __m256_result[7]) = 0xff800000; +- *((int*)& __m256_result[6]) = 0xff800000; +- *((int*)& __m256_result[5]) = 0xff800000; +- *((int*)& __m256_result[4]) = 0xff800000; +- *((int*)& __m256_result[3]) = 0xff800000; +- *((int*)& __m256_result[2]) = 0xff800000; +- *((int*)& __m256_result[1]) = 0xff800000; +- *((int*)& __m256_result[0]) = 0xff800000; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000038335ca2777; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000800800000; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000463fd2902d; +- *((unsigned long*)& __m128i_op0[0]) = 0x5ccd54bbfcac806c; +- unsigned_int_result = 0x00000000000000ac; +- unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0x2); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000800800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000800800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000004000000000; +- __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00007ff000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007ff000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00007ff000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; +- *((unsigned long*)& __m128i_op0[0]) = 0x342caf9be55700b5; +- *((unsigned long*)& __m128i_op1[1]) = 0x00040003ff83ff84; +- *((unsigned long*)& __m128i_op1[0]) = 0x00040003ff4dffca; +- *((unsigned long*)& __m128i_result[1]) = 0x0c07e181ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x3430af9effffffff; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000004000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000040400000383; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000800000007; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffc0ffff003f; +- __m128i_out = __lsx_vsrai_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0c07e181ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x3430af9effffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00007f8000007f80; +- *((unsigned long*)& __m256d_op0[2]) = 0x00007f8000007f80; +- *((unsigned long*)& __m256d_op0[1]) = 0x00007f8000007f80; +- *((unsigned long*)& __m256d_op0[0]) = 0x00007f8000007f80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00007f8000007f80; +- *((unsigned long*)& __m256i_op1[2]) = 0x00007f8000007f80; +- *((unsigned long*)& __m256i_op1[1]) = 0x00007f8000007f80; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007f8000007f80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000040000000400; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000040000000400; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,-15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; +- *((unsigned long*)& __m128i_op0[0]) = 0x342caf9be55700b5; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000040400000383; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0c03e17edd781b11; +- *((unsigned long*)& __m128i_result[0]) = 0x342caf9bffff1fff; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xcc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001000001; +- __m128i_out = __lsx_vexth_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0c03e17edd781b11; +- *((unsigned long*)& __m128i_op0[0]) = 0x342caf9bffff1fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000040000000400; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0c037fff342c7fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000004000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffc000000000; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_du(__m256i_op0,0x34); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000040400000383; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000040400000383; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128d_op2[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128d_result[0]) = 0xffffe000ffff1fff; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0x0028e0a1; +- *((int*)& __m128_op0[2]) = 0xa000a041; +- *((int*)& __m128_op0[1]) = 0x01000041; +- *((int*)& __m128_op0[0]) = 0x00010001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x01000001; +- *((int*)& __m128_op1[1]) = 0x00010001; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x01000001; +- *((int*)& __m128_op2[1]) = 0xffffe000; +- *((int*)& __m128_op2[0]) = 0xffff1fff; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x01000001; +- *((int*)& __m128_result[1]) = 0xffffe000; +- *((int*)& __m128_result[0]) = 0xffff1fff; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vslei_wu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000401000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100000004; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffc000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0000; +- __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000401000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000040100; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010000; +- __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000401000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000004; +- *((int*)& __m128_result[3]) = 0x40800000; +- *((int*)& __m128_result[2]) = 0x4b800000; +- *((int*)& __m128_result[1]) = 0x47800080; +- *((int*)& __m128_result[0]) = 0x40800000; +- __m128_out = __lsx_vffint_s_w(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000040400000383; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000383; +- *((unsigned long*)& __m128i_result[0]) = 0xe400000003ffc001; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000401000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000401000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001000001; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000383; +- *((unsigned long*)& __m128i_op0[0]) = 0xe400000003ffc001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe000ffff2382; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[2]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[0]) = 0x0005000500050005; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[2]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[0]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffe000ffff1fff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000090100000a; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe009ffff2008; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000040000000400; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,-10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000040100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe000ffff2382; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000040100; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010000; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_w(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[2]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[0]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op0[2]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op0[0]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[2]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[0]) = 0x0005000500050005; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00040100; +- *((int*)& __m128_op0[1]) = 0x00010001; +- *((int*)& __m128_op0[0]) = 0x00010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000040100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000384; +- *((unsigned long*)& __m128i_op1[0]) = 0xe3f0200004003ffd; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ff00; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff00ff00ff00; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000007f00000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000401000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000004; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x00000000007f0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; +- *((unsigned long*)& __m128i_result[0]) = 0x0404040404000404; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000007f00000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000401000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000110000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000007f00000004; +- __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000007f0000; +- __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000007f0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvneg_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000501000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000008; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000040100; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010400100203; +- *((unsigned long*)& __m128i_result[0]) = 0x0103010301020109; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000007f00; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001000000; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[2]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[0]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000050005; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000007f00; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000007f00; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001000000; +- __m128i_out = __lsx_vmaxi_d(__m128i_op0,-4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00007f00; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x01000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffffffc; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x3a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000050005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010400100203; +- *((unsigned long*)& __m128i_op0[0]) = 0x0103010301020109; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000110000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000007f00000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0202000402020202; +- *((unsigned long*)& __m128i_result[0]) = 0x0000200000010000; +- __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0001000100000004; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000501000002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100000008; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000050005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000505; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000505; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000505; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x02020004; +- *((int*)& __m128_op0[2]) = 0x02020202; +- *((int*)& __m128_op0[1]) = 0x00002000; +- *((int*)& __m128_op0[0]) = 0x00010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000505; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x2001240128032403; +- *((unsigned long*)& __m128i_op1[0]) = 0x288b248c00010401; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffdfffefffff7ffe; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0008; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0008; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0800000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xfffefffe; +- *((int*)& __m128_op0[0]) = 0xfffffffc; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xfffefffe; +- *((int*)& __m128_op1[0]) = 0xfffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000505; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; +- __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000800000008; +- __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000505; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffef000004ea; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffefffffffef; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffef000004ea; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffef000004ea; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffefffffffef; +- __m256i_out = __lasx_xvslli_h(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffef000004ea; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffefffffffef; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000002020202; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000004fb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xef); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffeffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100010102; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000001; +- *((int*)& __m256_op0[4]) = 0x00010102; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000101; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0018796d; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffffffc; +- __m128i_out = __lsx_vmini_b(__m128i_op0,4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001010300010102; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000410041; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000002020202; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x5b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000081; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffcff; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000102; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000102; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000102; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010103; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffcff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xfffffeff; +- *((int*)& __m128_op0[2]) = 0xfffffeff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xfffffcff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00fffefe; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128d_result[1]) = 0x800000ff000000ff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x800000ff000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x800000ff080000ff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000102; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000fffffffefe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fffefe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000fffefe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000808080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xfe02fe02; +- *((int*)& __m128_op0[2]) = 0xfe02fe02; +- *((int*)& __m128_op0[1]) = 0xfe02fe02; +- *((int*)& __m128_op0[0]) = 0xfe02fe02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000800000008; +- __m128i_out = __lsx_vfclass_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000000020000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000000020000; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xff8000000000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000800000000ffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x697eba2bedfa9c82; +- *((unsigned long*)& __m128i_op2[0]) = 0xd705c77a7025c899; +- *((unsigned long*)& __m128i_result[1]) = 0xffcb410000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffeb827ffffffff; +- __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000808080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000808; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000808080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080404040; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002000000020000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000000020000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fffefe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000fffefe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0a0a0a0a0a0a0a0a; +- *((unsigned long*)& __m256i_result[2]) = 0x0a0a0a0a0a0a0a0a; +- *((unsigned long*)& __m256i_result[1]) = 0x0a0a0a0a0a0a0a0a; +- *((unsigned long*)& __m256i_result[0]) = 0x0a0a0a0a0a0a0a0a; +- __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x697eba2bedfa9c82; +- *((unsigned long*)& __m128i_op0[0]) = 0xd705c77a7025c899; +- unsigned_int_result = 0x000000000000edfa; +- unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x5); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000102; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffefd; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2700000000002727; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000002727; +- *((unsigned long*)& __m128i_op1[1]) = 0x697eba2bedfa9c82; +- *((unsigned long*)& __m128i_op1[0]) = 0xd705c77a7025c899; +- *((unsigned long*)& __m128i_result[1]) = 0xc9c00000000009c9; +- *((unsigned long*)& __m128i_result[0]) = 0x0013938000000000; +- __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_hu(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffcb410000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffeb827ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x697eba2bedfa9c82; +- *((unsigned long*)& __m128i_op0[0]) = 0xd705c77a7025c899; +- *((unsigned long*)& __m128i_result[1]) = 0xedfaedfaedfaedfa; +- *((unsigned long*)& __m128i_result[0]) = 0xedfaedfaedfaedfa; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7fff7fff7fffdefd; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; +- __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x800000ff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ffffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000009; +- __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000009; +- *((unsigned long*)& __m128i_op1[1]) = 0x697eba2bedfa9c82; +- *((unsigned long*)& __m128i_op1[0]) = 0xd705c77a7025c899; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x03fdfffcfefe03fe; +- __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010000800100008; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffbf4; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffc; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffff01; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x6c6c6c6c6c6c6c6c; +- *((unsigned long*)& __m256i_result[2]) = 0x6c6c6c6c6c6c6c6c; +- *((unsigned long*)& __m256i_result[1]) = 0x6c6c6c6c6c6c6c6c; +- *((unsigned long*)& __m256i_result[0]) = 0x6c6c6c6c6c6c6c6c; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0x6c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffbf4; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000308; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x6c6c6c6c6c6c6c6c; +- *((unsigned long*)& __m256i_op1[2]) = 0x6c6c6c6c6c6c6c6c; +- *((unsigned long*)& __m256i_op1[1]) = 0x6c6c6c6c6c6c6c6c; +- *((unsigned long*)& __m256i_op1[0]) = 0x6c6c6c6c6c6c6c6c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; +- __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000003c; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffbf4; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_result[3]) = 0xf800f800f800c000; +- *((unsigned long*)& __m256i_result[2]) = 0xf800f800f800a000; +- *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800e000; +- __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x7c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff0002fffefffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0002ff7e8286; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff0002fffefffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0002ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0202000002020202; +- *((unsigned long*)& __m256i_result[2]) = 0x0202000002010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0202000002020202; +- *((unsigned long*)& __m256i_result[0]) = 0x0202000002020000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000fff08; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000fff09; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff80ff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff80000000ffff; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffff80ff0010ff06; +- *((unsigned long*)& __m128i_result[0]) = 0x00007f01000eff0a; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff80ff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff80000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000808; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xedfaedfaedfaedfa; +- *((unsigned long*)& __m128i_op0[0]) = 0xedfaedfaedfaedfa; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xedfaedfaedfaedfa; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff80ff0010ff06; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007f01000eff0a; +- *((unsigned long*)& __m128i_result[1]) = 0xffff80ff0010ff06; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; +- __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000804000004141; +- *((unsigned long*)& __m256i_op0[1]) = 0x00017fff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007fff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf800f800f800c000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf800f800f800a000; +- *((unsigned long*)& __m256i_op0[1]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_op1[3]) = 0xf800f800f800c000; +- *((unsigned long*)& __m256i_op1[2]) = 0xf800f800f800a000; +- *((unsigned long*)& __m256i_op1[1]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_op1[0]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff80ff0010ff06; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xedfaedfaedfaedfa; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xf6fd377cf705f680; +- *((unsigned long*)& __m128i_result[0]) = 0xc0000000bfff8000; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff80ff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff80000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000001fffe; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffff80ff0010ff06; +- *((unsigned long*)& __m128d_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xedfaedfaedfaedfa; +- *((unsigned long*)& __m128d_op1[0]) = 0xedfaedfaedfaedfa; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000300000003; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0202000002020202; +- *((unsigned long*)& __m256i_op0[2]) = 0x0202000002010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0202000002020202; +- *((unsigned long*)& __m256i_op0[0]) = 0x0202000002020000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x01fe000000ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x01fe000001fe0000; +- __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x01fe000000ff00ff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x01fe000001fe0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xcccccccc0000cccc; +- *((unsigned long*)& __m128i_result[0]) = 0xcccccccc0000cccc; +- __m128i_out = __lsx_vnori_b(__m128i_op0,0x33); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000040000000; +- *((unsigned long*)& __m256i_result[2]) = 0x4000000010000010; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000040000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000040000010; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000001fffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f7f7f7f00107f04; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f0000fd7f0000fd; +- *((unsigned long*)& __m128i_result[1]) = 0x7e7e7e7eff0f7f04; +- *((unsigned long*)& __m128i_result[0]) = 0x7f0000fd7f01fffb; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000808; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0000fffe0000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000fefc0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffe0000; +- __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000fffffefc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000fffffffe0; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000fffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000fffffffff; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; +- __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x7b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf800f800f800c000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf800f800f800a000; +- *((unsigned long*)& __m256i_op0[1]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff00ffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfff8080000004000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000080000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfff8080000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000022666621; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffdd9999da; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f7f7f7f00107f04; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f0000fd7f0000fd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000066621; +- *((unsigned long*)& __m128i_result[0]) = 0x01ff00085e9900ab; +- __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf800f800f800c000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf800f800f800a000; +- *((unsigned long*)& __m256i_op0[1]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff00ffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0001000100010000; +- *((unsigned long*)& __m256i_op2[2]) = 0x020afefb08140000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[0]) = 0x0003fffc00060000; +- *((unsigned long*)& __m256i_result[3]) = 0xf800f7fff8ffc0ff; +- *((unsigned long*)& __m256i_result[2]) = 0xf8fff7fff7ffa000; +- *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800e000; +- *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800e000; +- __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000001f; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000300000003; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000002; +- *((int*)& __m128_op0[2]) = 0x00000002; +- *((int*)& __m128_op0[1]) = 0x00000003; +- *((int*)& __m128_op0[0]) = 0x00000003; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrecip_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x3fc000003fc00000; +- *((unsigned long*)& __m128i_result[0]) = 0x3fc000003fc00000; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3fc000003fc00000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3fc000003fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_w(__m128i_op0,1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x5eff0000; +- *((int*)& __m128_result[2]) = 0x5eff0000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3fc000003fc00000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3fc000003fc00000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3fc000003fc00000; +- *((unsigned long*)& __m128i_result[0]) = 0x3fc000003fc00000; +- __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f7f7f7f00107f04; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f0000fd7f0000fd; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_d(__m256i_op0,-5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffff00ffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffff00ffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x7f800000; +- *((int*)& __m128_op0[2]) = 0x7f800000; +- *((int*)& __m128_op0[1]) = 0x7f800000; +- *((int*)& __m128_op0[0]) = 0x7f800000; +- *((int*)& __m128_op1[3]) = 0x00000002; +- *((int*)& __m128_op1[2]) = 0x00000002; +- *((int*)& __m128_op1[1]) = 0x00000003; +- *((int*)& __m128_op1[0]) = 0x00000003; +- *((int*)& __m128_op2[3]) = 0x3fc00000; +- *((int*)& __m128_op2[2]) = 0x3fc00000; +- *((int*)& __m128_op2[1]) = 0x3fc00000; +- *((int*)& __m128_op2[0]) = 0x3fc00000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000800000008; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x7f8000007f800000; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000020afefb1; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f350104f7ebffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000003fffc1; +- *((unsigned long*)& __m256i_op1[0]) = 0x005c0003fff9ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000fe6a021; +- *((unsigned long*)& __m256i_result[1]) = 0x2000000020000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000b8000; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00feff0000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00feff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ffff0000000000; +- __m128i_out = __lsx_vslti_b(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00010001; +- *((int*)& __m256_op0[6]) = 0x00010000; +- *((int*)& __m256_op0[5]) = 0x020afefb; +- *((int*)& __m256_op0[4]) = 0x08140000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x0003fffc; +- *((int*)& __m256_op0[0]) = 0x00060000; +- *((int*)& __m256_op1[7]) = 0x80000000; +- *((int*)& __m256_op1[6]) = 0x40000000; +- *((int*)& __m256_op1[5]) = 0x40000000; +- *((int*)& __m256_op1[4]) = 0x10000010; +- *((int*)& __m256_op1[3]) = 0x80000000; +- *((int*)& __m256_op1[2]) = 0x40000000; +- *((int*)& __m256_op1[1]) = 0x80000000; +- *((int*)& __m256_op1[0]) = 0x40000010; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x000000ff; +- *((int*)& __m256_op2[4]) = 0x0001ffff; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x0000ffff; +- *((int*)& __m256_op2[0]) = 0x00010000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80020000; +- *((int*)& __m256_result[5]) = 0x828aff0b; +- *((int*)& __m256_result[4]) = 0x8001ffff; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000002; +- *((int*)& __m256_result[1]) = 0x8000ffff; +- *((int*)& __m256_result[0]) = 0x800d0002; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000300000003; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffc0003fffa0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x01fb010201f900ff; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010000; +- *((unsigned long*)& __m256i_op0[2]) = 0x020afefb08140000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0003fffc00060000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff00ffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff0001ff02; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff020afefc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000003fefd; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xfffffffe; +- *((int*)& __m256_op0[5]) = 0xfffffffe; +- *((int*)& __m256_op0[4]) = 0xfffffefc; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xfffffffe; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xfffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0209fefb08140000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0003fffc00060000; +- *((unsigned long*)& __m256d_result[3]) = 0x6100000800060005; +- *((unsigned long*)& __m256d_result[2]) = 0x5ee1c073b800c916; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x5ff00007fff9fff3; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0001ff02; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff020afefc; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000003fefd; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffefffefff7fff7; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7ffffffbfffb; +- __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x7f800000; +- *((int*)& __m128_op0[2]) = 0x7f800000; +- *((int*)& __m128_op0[1]) = 0x7f800000; +- *((int*)& __m128_op0[0]) = 0x7f800000; +- *((int*)& __m128_op1[3]) = 0x7f800000; +- *((int*)& __m128_op1[2]) = 0x7f800000; +- *((int*)& __m128_op1[1]) = 0x7f800000; +- *((int*)& __m128_op1[0]) = 0x7f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op2[1]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007f7f80807f7f80; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f80000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; +- __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f80000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0701000007010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0701000000000000; +- __m128i_out = __lsx_vpcnt_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000020afefb1; +- *((unsigned long*)& __m256d_op0[2]) = 0x7f350104f7ebffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000003fffc1; +- *((unsigned long*)& __m256d_op0[0]) = 0x005c0003fff9ffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x0209fefb08140000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0003fffc00060000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffff80cb; +- *((int*)& __m256_op1[6]) = 0xfffffdf8; +- *((int*)& __m256_op1[5]) = 0x00000815; +- *((int*)& __m256_op1[4]) = 0x00000104; +- *((int*)& __m256_op1[3]) = 0xffffffa4; +- *((int*)& __m256_op1[2]) = 0xfffffffd; +- *((int*)& __m256_op1[1]) = 0x00000007; +- *((int*)& __m256_op1[0]) = 0x00000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_du(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff80cbfffffdf8; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000081500000104; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffa4fffffffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000700000002; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff80cbfffffdf8; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffa4fffffffd; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; +- *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00080000000cc916; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000006fff3; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; +- *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ffff00ff000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff8080000004000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000080000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff8080000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; +- __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; +- *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; +- *((unsigned long*)& __m256i_result[3]) = 0x0000005f000000f0; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000f9; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000f3; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0001ff02; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff020afefc; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000003fefd; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff0001ff04; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff02a0fefc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000cfefd; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_w(__m128i_op0,-10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x807f7f8000ffff00; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00feff00; +- *((unsigned long*)& __m128i_result[1]) = 0xffff00000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff0000ffff; +- __m128i_out = __lsx_vslti_h(__m128i_op0,4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0001ff04; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff02a0fefc; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000cfefd; +- *((unsigned long*)& __m256i_op1[3]) = 0x6100000800060005; +- *((unsigned long*)& __m256i_op1[2]) = 0x5ee1c073b800c916; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5ff00007fff9fff3; +- *((unsigned long*)& __m256i_op2[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op2[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op2[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op2[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff7fffbfefa; +- *((unsigned long*)& __m256i_result[2]) = 0xff1eff1902a0fea4; +- *((unsigned long*)& __m256i_result[1]) = 0xff10000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff10fff9ff13fd17; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00080000000cc916; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000006fff3; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00f8000000f41bfb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000fa0106; +- __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x56); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x807f7f8000ffff00; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff00feff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0107070100080800; +- *((unsigned long*)& __m128i_result[0]) = 0x0000080800070800; +- __m128i_out = __lsx_vpcnt_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; +- *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ffff00ff000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00080005c073c916; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000100000007fff3; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000000010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000000010000; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6100000800060005; +- *((unsigned long*)& __m256i_op0[2]) = 0x5ee1c073b800c916; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5ff00007fff9fff3; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0209fefb08140000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc00060000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000800000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000bf6e0000c916; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000030000fff3; +- __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f78787f00f7f700; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000f7f700f7f700; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000bf6e0000c916; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000030000fff3; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000800000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000bf6e0000c916; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000030000fff3; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xbea2e127; +- *((int*)& __m256_op1[6]) = 0xc046721f; +- *((int*)& __m256_op1[5]) = 0x1729c073; +- *((int*)& __m256_op1[4]) = 0x816edebe; +- *((int*)& __m256_op1[3]) = 0xde91f010; +- *((int*)& __m256_op1[2]) = 0x000006f9; +- *((int*)& __m256_op1[1]) = 0x5ef1f90e; +- *((int*)& __m256_op1[0]) = 0xfefaf30d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010102; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010201010204; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010102; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010102; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbea2e127c046721f; +- *((unsigned long*)& __m256i_op0[2]) = 0x1729c073816edebe; +- *((unsigned long*)& __m256i_op0[1]) = 0xde91f010000006f9; +- *((unsigned long*)& __m256i_op0[0]) = 0x5ef1f90efefaf30d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000060000108; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001060005; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000007fef0001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0xbfa3e127c147721f; +- *((unsigned long*)& __m256i_result[2]) = 0x1729c173836edfbe; +- *((unsigned long*)& __m256i_result[1]) = 0xdf91f111808007fb; +- *((unsigned long*)& __m256i_result[0]) = 0x5ff1f90ffffbf30f; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffc500000002d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000034; +- *((unsigned long*)& __m256i_op1[3]) = 0xbfa3e127c147721f; +- *((unsigned long*)& __m256i_op1[2]) = 0x1729c173836edfbe; +- *((unsigned long*)& __m256i_op1[1]) = 0xdf91f111808007fb; +- *((unsigned long*)& __m256i_op1[0]) = 0x5ff1f90ffffbf30f; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ff280016; +- *((unsigned long*)& __m256i_result[2]) = 0xd193a30f94b9b7df; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000001001a; +- *((unsigned long*)& __m256i_result[0]) = 0xc88840fdf887fd87; +- __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000bea20000e127; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000c0460000721f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000de910000f010; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000006f9; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000bea20; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000c0460; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000de910; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x37); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000bf6e0000c916; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000030000fff3; +- *((unsigned long*)& __m256i_op1[3]) = 0x001175f10e4330e8; +- *((unsigned long*)& __m256i_op1[2]) = 0xff8f0842ff29211e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffff8d9ffa7103d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000e00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffefc; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x01480000052801a2; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffdcff64; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbea2e127c046721f; +- *((unsigned long*)& __m256i_op0[2]) = 0x1729c073816edebe; +- *((unsigned long*)& __m256i_op0[1]) = 0xde91f010000006f9; +- *((unsigned long*)& __m256i_op0[0]) = 0x5ef1f90efefaf30d; +- *((unsigned long*)& __m256i_result[3]) = 0x515f93f023600fb9; +- *((unsigned long*)& __m256i_result[2]) = 0x948b39e0b7405f6f; +- *((unsigned long*)& __m256i_result[1]) = 0x48ef087800007c83; +- *((unsigned long*)& __m256i_result[0]) = 0x78af877c7d7f86f9; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0101010101010102; +- *((unsigned long*)& __m256d_op0[2]) = 0x0101010201010204; +- *((unsigned long*)& __m256d_op0[1]) = 0x0101010101010102; +- *((unsigned long*)& __m256d_op0[0]) = 0x0101010101010102; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000e00ff00ff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010201010204; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010102; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010102; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x515f93f0; +- *((int*)& __m256_op0[6]) = 0x23600fb9; +- *((int*)& __m256_op0[5]) = 0x948b39e0; +- *((int*)& __m256_op0[4]) = 0xb7405f6f; +- *((int*)& __m256_op0[3]) = 0x48ef0878; +- *((int*)& __m256_op0[2]) = 0x00007c83; +- *((int*)& __m256_op0[1]) = 0x78af877c; +- *((int*)& __m256_op0[0]) = 0x7d7f86f9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000df93f0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000077843; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x001175f10e4330e8; +- *((unsigned long*)& __m256d_op0[2]) = 0xff8f0842ff29211e; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffff8d9ffa7103d; +- *((unsigned long*)& __m256d_op1[3]) = 0x001175f10e4330e8; +- *((unsigned long*)& __m256d_op1[2]) = 0xff8f0842ff29211e; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffff8d9ffa7103d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaxi_h(__m128i_op0,-1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x01480000052801a2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffdcff64; +- *((unsigned long*)& __m256i_op1[3]) = 0xbea2e127c046721f; +- *((unsigned long*)& __m256i_op1[2]) = 0x1729c073816edebe; +- *((unsigned long*)& __m256i_op1[1]) = 0xde91f010000006f9; +- *((unsigned long*)& __m256i_op1[0]) = 0x5ef1f90efefaf30d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00170000028500de; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fd02f20d; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010203; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x01480000052801a2; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffdcff64; +- *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010203; +- *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000060000108; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001060005; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fef0001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001175f10e4330e8; +- *((unsigned long*)& __m256i_op0[2]) = 0xff8f0842ff29211e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffff8d9ffa7103d; +- *((unsigned long*)& __m256i_result[3]) = 0x001175f10e4330e8; +- *((unsigned long*)& __m256i_result[2]) = 0xff8f0842ff29211e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffff8d9ffa7103d; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x39); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001175f10e4330e8; +- *((unsigned long*)& __m256i_op0[2]) = 0xff8f0842ff29211e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffff8d9ffa7103d; +- *((unsigned long*)& __m256i_result[3]) = 0x001151510a431048; +- *((unsigned long*)& __m256i_result[2]) = 0x5b0b08425b09011a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x5b5b58595b031019; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x5b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x01480000052801a2; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffdcff64; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x001175f10e4330e8; +- *((unsigned long*)& __m256i_op1[2]) = 0xff8f0842ff29211e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffff8d9ffa7103d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000df93f0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000077843; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000003800000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x001175f10e4330e8; +- *((unsigned long*)& __m256i_op1[2]) = 0xff8f0842ff29211e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffff8d9ffa7103d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010203; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffcfa; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; +- __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fff80000; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffcfa; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00070007; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff0007ffff; +- __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff0000; +- __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000fff80000; +- *((unsigned long*)& __m128d_result[1]) = 0x80000000fff8fff8; +- *((unsigned long*)& __m128d_result[0]) = 0x80000000fff80000; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffcfa; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffcfa; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffcfa; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff8fffffff8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff8fc000000; +- __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x80000000fff8fff8; +- *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff80000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x60000108; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x01060005; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x7fef0001; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xfffffff8; +- *((int*)& __m256_op1[4]) = 0xfffffff8; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xfffffff8; +- *((int*)& __m256_op1[0]) = 0xfc000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010203; +- *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffcfa; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xfff8fff8; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xfff80000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0xfff8fff8; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0xfff80000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x6d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffe2ffe2ffe2ffe2; +- *((unsigned long*)& __m128i_result[0]) = 0xffe2ffe2ffe2ffe2; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010100000102; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010100000102; +- __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffcfa; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xfffffff8fffffff8; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xfffffff8fc000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfafafafafafafafa; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000fefefe; +- __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; +- __m256i_out = __lasx_xvslli_h(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x80000000fff8fff8; +- *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff80000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f800000fff8fff8; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f800000fff80000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x80000000fff80000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ff7f0000ff7f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ff7f0000ff7f; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfafafafafafafafa; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000fefefe; +- *((unsigned long*)& __m256d_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010203; +- *((unsigned long*)& __m256d_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000010100000102; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000010100000102; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffefd; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffefd; +- __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff80000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op1[0]) = 0x80000000fff80000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000004000; +- *((unsigned long*)& __m128i_result[0]) = 0xfff8004000000000; +- __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256d_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xc08f780000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256d_result[1]) = 0xc08f780000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvflogb_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfafafafafafafafa; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fefefe; +- *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000004000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff8004000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8282828282828282; +- *((unsigned long*)& __m128i_result[0]) = 0x8282828282828282; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x82); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfafafafafafafafa; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fefefe; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xf9fbf9fbf9fbf9fb; +- *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[1]) = 0xfdfffdfffdfffdff; +- *((unsigned long*)& __m256i_result[0]) = 0xff01ff01fffffdff; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,-10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffefd; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffefd; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xc08f7800; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xfffffefd; +- *((int*)& __m256_op0[3]) = 0xc08f7800; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000101; +- *((int*)& __m256_op1[4]) = 0x00000102; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000101; +- *((int*)& __m256_op1[0]) = 0x00000102; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000a0a08000; +- *((unsigned long*)& __m128i_op0[0]) = 0x5350a08000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffefd; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffefd; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fd; +- __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f017f807f017d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f017f807f017f; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000017f0000017d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000017f0000017f; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00007dfd; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00007dfd; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_b(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000004000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff8004000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000001; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000001; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000017f0000017d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000017f0000017f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000017f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000017f; +- __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000017f0000017d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000017f0000017f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000017f0000017d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000017f0000017f; +- __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000004000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff8004000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffc002000000000; +- __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x2e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000017f0000017d; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000017f0000017f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000017f; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffc002000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc002000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffc002000000000; +- __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_result[2]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_result[1]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_result[0]) = 0x1717171717171717; +- __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffc002000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffc002000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_op0[2]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_op0[1]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_op0[0]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff000607f7; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000010017e7d1; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff000607f7; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000001001807f1; +- *((unsigned long*)& __m256i_result[3]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_result[2]) = 0x000607f700000001; +- *((unsigned long*)& __m256i_result[1]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_result[0]) = 0x000607f700000001; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffc001fffffffff; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff000607f7; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000010017e7d1; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff000607f7; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000001001807f1; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000017f0000017d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000017f0000017f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000002e0000002e; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000002e0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000002e0000002e; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000002e0000fffe; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000002e0000002e; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000002e0000ffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000002e0000002e; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000002e0000fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000f7bc0001f7bd; +- *((unsigned long*)& __m256i_result[2]) = 0x0000f93b0000017c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000f7bc0001f7bd; +- *((unsigned long*)& __m256i_result[0]) = 0x0000f93b0000017b; +- __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffc001fffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000200000; +- *((unsigned long*)& __m128i_result[0]) = 0x001fff8004000000; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffefdfffffefd; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((int*)& __m256_result[7]) = 0x4b808080; +- *((int*)& __m256_result[6]) = 0x4b808080; +- *((int*)& __m256_result[5]) = 0x4f800000; +- *((int*)& __m256_result[4]) = 0x4f7fffff; +- *((int*)& __m256_result[3]) = 0x4b808080; +- *((int*)& __m256_result[2]) = 0x4b808080; +- *((int*)& __m256_result[1]) = 0x4f800000; +- *((int*)& __m256_result[0]) = 0x4f800000; +- __m256_out = __lasx_xvffint_s_wu(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0000007f; +- *((int*)& __m128_op0[2]) = 0x0000007f; +- *((int*)& __m128_op0[1]) = 0x0000007f; +- *((int*)& __m128_op0[0]) = 0x0000007f; +- *((int*)& __m128_op1[3]) = 0x3ff00000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xfffc0020; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffc001f; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010202050120; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010102020202; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000f7bc0001f7bd; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000f93b0000017c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000f7bc0001f7bd; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000f93b0000017b; +- *((unsigned long*)& __m256i_result[3]) = 0xfff2f7bcfff2f7bd; +- *((unsigned long*)& __m256i_result[2]) = 0xfff2f93bfff2fff2; +- *((unsigned long*)& __m256i_result[1]) = 0xfff2f7bcfff2f7bd; +- *((unsigned long*)& __m256i_result[0]) = 0xfff2f93bfff2fff2; +- __m256i_out = __lasx_xvmini_h(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010202050120; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010102020202; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_op0[2]) = 0x000607f700000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_op0[0]) = 0x000607f700000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffe81; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff8000; +- *((unsigned long*)& __m128i_result[0]) = 0x0010000200020002; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff2f7bcfff2f7bd; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff2f93bfff2fff2; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff2f7bcfff2f7bd; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff2f93bfff2fff2; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffcf800fffcfffc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffcfffc; +- __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefdfffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0xfffffffffffffefd; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x4); +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000100; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffefd; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000100; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xfffffefdfffffefd; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000100; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffff7d80000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000100; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffe81; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe81; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0001ffff8002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010000400020004; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff20ff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc0020ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x07fff80000008000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000007ffe001; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00cf01fe01fe01fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x000301de01fe01fe; +- *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc002000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0f00000000000000; +- __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x17171717; +- *((int*)& __m256_op0[6]) = 0x17171717; +- *((int*)& __m256_op0[5]) = 0x000607f7; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x17171717; +- *((int*)& __m256_op0[2]) = 0x17171717; +- *((int*)& __m256_op0[1]) = 0x000607f7; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vclo_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc002000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00003ff000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000fffc00000000; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffcf800fffcf800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_result[2]) = 0xfffcf800fffcf800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; +- __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_op0[2]) = 0x000607f700000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_op0[0]) = 0x000607f700000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000002e0000002e; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000002e0000ffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000002e0000002e; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000002e0000fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_result[2]) = 0x000607f700000001; +- *((unsigned long*)& __m256i_result[1]) = 0x1717171717171717; +- *((unsigned long*)& __m256i_result[0]) = 0x000607f700000001; +- __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00003ff000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000fffc00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000002e0000002e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000002e0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000002e0000002e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000002e0000fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000002e; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000002e; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000002e; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000fffe; +- __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00003ff000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fffc00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00001ff800000001; +- *((unsigned long*)& __m128i_result[0]) = 0x7ffe800e80000000; +- __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000307fffe72e800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00003ff000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fffc00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00001ff800000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ffe800e80000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_wu(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00005555; +- *((int*)& __m256_op1[6]) = 0x00005555; +- *((int*)& __m256_op1[5]) = 0x000307ff; +- *((int*)& __m256_op1[4]) = 0xfe72e815; +- *((int*)& __m256_op1[3]) = 0x00005555; +- *((int*)& __m256_op1[2]) = 0x00005555; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000015; +- *((int*)& __m256_result[7]) = 0x00005555; +- *((int*)& __m256_result[6]) = 0x00005555; +- *((int*)& __m256_result[5]) = 0x000307ff; +- *((int*)& __m256_result[4]) = 0xfe72e815; +- *((int*)& __m256_result[3]) = 0x00005555; +- *((int*)& __m256_result[2]) = 0x00005555; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000015; +- __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00003ff000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fffc00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0000ffff; +- *((int*)& __m128_op0[2]) = 0x0000ffff; +- *((int*)& __m128_op0[1]) = 0x0000ffff; +- *((int*)& __m128_op0[0]) = 0x0000fffe; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffcf800fffcf800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00003fee; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000004; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000002; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff100fffc; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffdf100fffc; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff100fffc; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x21); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0x0001ffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_result[1]) = 0x0001ffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvsat_du(__m256i_op0,0x30); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffcf800fffcf800; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_result[2]) = 0xfffcf800fffcf800; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x7fffffff; +- *((int*)& __m128_op0[2]) = 0x7fffffff; +- *((int*)& __m128_op0[1]) = 0x7fffffff; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffcf800fffcf800; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_result[3]) = 0x0008000800000003; +- *((unsigned long*)& __m256i_result[2]) = 0x0806050008060500; +- *((unsigned long*)& __m256i_result[1]) = 0x0008000800000003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010000000100; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001fffe00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001ffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001ffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_du(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x0000ffff; +- *((int*)& __m128_op0[2]) = 0x0000ffff; +- *((int*)& __m128_op0[1]) = 0x0000ffff; +- *((int*)& __m128_op0[0]) = 0x0000fffe; +- *((int*)& __m128_op1[3]) = 0x0000ffff; +- *((int*)& __m128_op1[2]) = 0x0000ffff; +- *((int*)& __m128_op1[1]) = 0x0000ffff; +- *((int*)& __m128_op1[0]) = 0x0000fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x16161616a16316b0; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x16161616a16316b0; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_d_q(__m128i_op0,__m128i_op1,0x7c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0xf9f8f9f8f9f9f900; +- *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9e0; +- *((unsigned long*)& __m256i_result[1]) = 0xf9f8f9f8f9f9f900; +- *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f900; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000a16316b0; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000063636363; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000a1630000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000080000; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff010000fff9; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff19; +- *((unsigned long*)& __m256i_result[1]) = 0xff02ff020001fffa; +- *((unsigned long*)& __m256i_result[0]) = 0x000100010001fffa; +- __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000080000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000a16316b0; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000063636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x16161616a16316b0; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000a16316b0; +- *((unsigned long*)& __m128i_result[0]) = 0x16161616a16316b0; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xa7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfe82fe0200000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe82fe0200000000; +- *((unsigned long*)& __m128d_result[1]) = 0xc177d01fe0000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffintl_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff01ff010000fff9; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff19; +- *((unsigned long*)& __m256i_op0[1]) = 0xff02ff020001fffa; +- *((unsigned long*)& __m256i_op0[0]) = 0x000100010001fffa; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0x00fe01ff0006ffcf; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000e62f8f; +- *((unsigned long*)& __m256i_result[1]) = 0x00fe02fe0006ffd6; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000006ffd6; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff100fffc; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff100fffc; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00000007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[3]) = 0xff01ff010000fff9; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff19; +- *((unsigned long*)& __m256i_op1[1]) = 0xff02ff020001fffa; +- *((unsigned long*)& __m256i_op1[0]) = 0x000100010001fffa; +- *((unsigned long*)& __m256i_result[3]) = 0x807f807f00000380; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007380; +- *((unsigned long*)& __m256i_result[1]) = 0xc03fc03f000001c0; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000001c0; +- __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000a16316b0; +- *((unsigned long*)& __m128i_op1[0]) = 0x16161616a16316b0; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ffffa10016; +- *((unsigned long*)& __m128i_result[0]) = 0x01150115ffa10016; +- __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000a1630000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000a1630000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0001ffff0001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000a163000016b0; +- *((unsigned long*)& __m128i_result[1]) = 0x0303000103030001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000030300000303; +- __m128i_out = __lsx_vmini_bu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff7100fffc; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ffffa10016; +- *((unsigned long*)& __m128i_op1[0]) = 0x01150115ffa10016; +- *((unsigned long*)& __m128i_result[1]) = 0x000100fe000070a1; +- *((unsigned long*)& __m128i_result[0]) = 0x00000115ffffffa1; +- __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000f9f900; +- *((unsigned long*)& __m256i_op0[2]) = 0x79f9f9f900f9f9e0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000f9f900; +- *((unsigned long*)& __m256i_op0[0]) = 0x79f9f9f900f9f900; +- *((unsigned long*)& __m256i_result[3]) = 0x00f9f90079f9f9f9; +- *((unsigned long*)& __m256i_result[2]) = 0x79f9f9f900000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00f9f90079f9f9f9; +- *((unsigned long*)& __m256i_result[0]) = 0x79f9f9f900000000; +- __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x97); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000100fe000070a1; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000115ffffffa1; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xe86ce7eb5e9ce950; +- *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; +- *((unsigned long*)& __m128i_result[0]) = 0xec68e3ef5a98ed54; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000008; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00080000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffe40; +- *((unsigned long*)& __m256i_op1[3]) = 0x00f9f90079f9f9f9; +- *((unsigned long*)& __m256i_op1[2]) = 0x79f9f9f900000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00f9f90079f9f9f9; +- *((unsigned long*)& __m256i_op1[0]) = 0x79f9f9f900000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff8c80; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe40; +- __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xf436f3f5; +- *((int*)& __m128_op0[0]) = 0x2f4ef4a8; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfcvth_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffe40; +- __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_w(__m128i_op0,1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffint_d_lu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0004000000040000; +- *((unsigned long*)& __m128i_result[0]) = 0x0004000000040000; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m128i_result[0]) = 0xfff6fff6fff6fff6; +- __m128i_out = __lsx_vmini_h(__m128i_op0,-10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_d(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0004000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0004000000040000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0004000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0004000000040000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0100010001000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0100010001000000; +- __m256i_out = __lasx_xvslli_d(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0100010001000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0100010001000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0004000400040004; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x807f807f00000380; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007380; +- *((unsigned long*)& __m256i_op0[1]) = 0xc03fc03f000001c0; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001c0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_result[3]) = 0x807f807f00000380; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007380; +- *((unsigned long*)& __m256i_result[1]) = 0xc03fc03f000001c0; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000001c0; +- __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100010001000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xf436f3f52f4ef4a8; +- *((unsigned long*)& __m128i_result[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; +- __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00f9f90079f9f9f9; +- *((unsigned long*)& __m256i_op1[2]) = 0x79f9f9f900000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00f9f90079f9f9f9; +- *((unsigned long*)& __m256i_op1[0]) = 0x79f9f9f900000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x2000200020002000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff80000000000000; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffc0; +- __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffe40; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000040004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0e400; +- __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff8000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff8000000000; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf436f3f52f4ef4a8; +- *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xf4b6f3f52f4ef4a8; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xf436f3f5; +- *((int*)& __m128_op0[0]) = 0x2f4ef4a8; +- *((int*)& __m128_op1[3]) = 0xff800000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xff800000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0x2f4ef4a8; +- __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0004000400040004; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff8000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff8000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff8000000000; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001600000016; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001600000016; +- __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00800000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xf4b6f3f5; +- *((int*)& __m128_op0[0]) = 0x2f4ef4a8; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007380; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000f1c00; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffbfffc; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001600000016; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001600000016; +- *((int*)& __m128_result[3]) = 0x41b00000; +- *((int*)& __m128_result[2]) = 0x41b00000; +- *((int*)& __m128_result[1]) = 0x41b00000; +- *((int*)& __m128_result[0]) = 0x41b00000; +- __m128_out = __lsx_vffint_s_wu(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0e400; +- __m256i_out = __lasx_xvmini_w(__m256i_op0,12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff8000002f4ef4a8; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000f4a8; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; +- __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff0e400; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000fff0e400; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000007380; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000000f1c00; +- *((unsigned long*)& __m256d_op2[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op2[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256d_op2[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op2[0]) = 0x00000000fff0e400; +- *((unsigned long*)& __m256d_result[3]) = 0x80000000ffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0x80000000ffff8c80; +- *((unsigned long*)& __m256d_result[1]) = 0x80000000ffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0x80000000fff0e400; +- __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffe40; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0e400; +- __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x000000ff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000ff00; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrecip_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_b(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0080000000000000; +- __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xc1bdceee242070db; +- *((unsigned long*)& __m128i_op1[0]) = 0xe8c7b756d76aa478; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; +- __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffe40; +- *((unsigned long*)& __m256i_op1[3]) = 0x80000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x80000000ffff8c80; +- *((unsigned long*)& __m256i_op1[1]) = 0x80000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x80000000fff0e400; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000f1a40; +- __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; +- *((unsigned long*)& __m128i_result[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xf4b6f3f52f4ef4a8; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xf4b6f3f52f4ef4a8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff8c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0e400; +- *((unsigned long*)& __m256i_op1[3]) = 0x80000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x80000000ffff8c80; +- *((unsigned long*)& __m256i_op1[1]) = 0x80000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x80000000fff0e400; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ff01ff01; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff01c000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ff01ff01; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000f1000000; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff00fff0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000007f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000007f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000007f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007f007f78; +- __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002ff5; +- *((unsigned long*)& __m128i_op0[0]) = 0xc2cf2471e9b7d7a4; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000027f5; +- *((unsigned long*)& __m128i_result[0]) = 0xc2cf2471e9b7d7a4; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff01ff01; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff01c000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff01ff01; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000f1000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000001341c4000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001000310000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000033e87ef1; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000002e2100; +- __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff1739ffff48aa; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff2896ffff5b88; +- *((unsigned long*)& __m128i_result[1]) = 0x3f3f17393f3f3f3f; +- *((unsigned long*)& __m128i_result[0]) = 0x3f3f283f3f3f3f3f; +- __m128i_out = __lsx_vsat_bu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000033e87ef1; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000033007e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000021; +- __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f7f; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f007f78; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000033007e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000021; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007f7f00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007f7f00007fff; +- __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xff800000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xff800000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffff1739; +- *((int*)& __m128_op1[2]) = 0xffff48aa; +- *((int*)& __m128_op1[1]) = 0xffff2896; +- *((int*)& __m128_op1[0]) = 0xffff5b88; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007f7f; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f007f78; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f00007f7f0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7f00fffb7f78fffc; +- __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001341c4000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001000310000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000033e87ef1; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000002e2100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000011c00; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000e8f1; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000103100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000002e00; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00007f7f00000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00007f7f00007fff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000000f1a40; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x2757de72; +- *((int*)& __m128_op0[2]) = 0x33d771a3; +- *((int*)& __m128_op0[1]) = 0x166891d5; +- *((int*)& __m128_op0[0]) = 0x1e8b7eff; +- *((int*)& __m128_op1[3]) = 0x2757de72; +- *((int*)& __m128_op1[2]) = 0x33d771a3; +- *((int*)& __m128_op1[1]) = 0x166891d5; +- *((int*)& __m128_op1[0]) = 0x1e8b7eff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001341c4000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001000310000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00007f7f00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007f7f00007fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000007f00340040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000007f000000ff; +- __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000033e87ef1; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x80008000b3e8fef1; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x80008000802ea100; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; +- __m128i_out = __lsx_vfclass_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; +- *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0080000000000000; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7ff77fff7ff7; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7ff77fff7ff7; +- __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; +- *((unsigned long*)& __m128i_op0[0]) = 0x6a1a3fbb3c90260e; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x195f307a5d04acbb; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; +- *((unsigned long*)& __m128i_op1[1]) = 0x195f307a5d04acbb; +- *((unsigned long*)& __m128i_op1[0]) = 0x6a1a3fbb3c90260e; +- *((unsigned long*)& __m128i_result[1]) = 0x19df307a5d04acbb; +- *((unsigned long*)& __m128i_result[0]) = 0x5ed032b06bde1ab6; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfff5fff4002ffff5; +- __m128i_out = __lsx_vsrari_h(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ff77fff7ff7; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7ff77fff7ff7; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000022; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000002000000022; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x3e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x19df307a5d04acbb; +- *((unsigned long*)& __m128i_op0[0]) = 0x5ed032b06bde1ab6; +- *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x19de307a5d04acba; +- *((unsigned long*)& __m128i_result[0]) = 0x5ed032b06bde1ab6; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000012e2110; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff5fff4002ffff5; +- *((unsigned long*)& __m128i_op1[1]) = 0xaa858644fb8b3d49; +- *((unsigned long*)& __m128i_op1[0]) = 0x18499e2cee2cc251; +- *((unsigned long*)& __m128i_result[1]) = 0x8644000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xaed495f03343a685; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000012e2110; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_result[2]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_result[1]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x80008000b3e8fef1; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x80008000802ea100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000012e2110; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x012e2110012e2110; +- __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xfffffffe; +- *((int*)& __m128_op0[0]) = 0xbe6ed565; +- *((int*)& __m128_op1[3]) = 0x195f307a; +- *((int*)& __m128_op1[2]) = 0x5d04acbb; +- *((int*)& __m128_op1[1]) = 0x6a1a3fbb; +- *((int*)& __m128_op1[0]) = 0x3c90260e; +- *((int*)& __m128_op2[3]) = 0xffffffff; +- *((int*)& __m128_op2[2]) = 0xffffffff; +- *((int*)& __m128_op2[1]) = 0xfffffffe; +- *((int*)& __m128_op2[0]) = 0xbe6ed565; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0xfffffffe; +- *((int*)& __m128_result[0]) = 0x3e730941; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x012e2110012e2110; +- int_op1 = 0x00000000000000ac; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000ac; +- *((unsigned long*)& __m256i_result[0]) = 0x012e2110012e2110; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; +- *((unsigned long*)& __m128i_op0[0]) = 0x6a1a3fbb3c90260e; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xe6a0cf86a2fb5345; +- *((unsigned long*)& __m128i_result[0]) = 0x95e5c045c36fd9f2; +- __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_op0[2]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_op0[1]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffebe6ed565; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffebe6ed565; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffbe6ed563; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00007f7f00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007f7f00007fff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000007f00340040; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000007f000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020200008; +- *((unsigned long*)& __m256i_result[1]) = 0x0008010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007f7f00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f00007fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000040000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007f7f00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000040000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007f7f00007fff; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x2a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_h(__m128i_op0,10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x002e2100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0d1bffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xd915e98e2d8df4d1; +- *((unsigned long*)& __m128i_result[1]) = 0xd0b1ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x9d519ee8d2d84f1d; +- __m128i_out = __lsx_vrotri_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020200008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0008010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_result[2]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_result[1]) = 0x0008000001010000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101000001010000; +- __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8644000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xaed495f03343a685; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffbe6ed563; +- *((unsigned long*)& __m128i_result[1]) = 0x8644ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000fffe; +- __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0080000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0080000000000000; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000007f00340040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000007f000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbe6ed563; +- *((unsigned long*)& __m128i_op1[1]) = 0xd0b1ffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x9d519ee8d2d84f1d; +- *((unsigned long*)& __m128i_result[1]) = 0xfefd7f7f7f7f7f7e; +- *((unsigned long*)& __m128i_result[0]) = 0xdffdbffeba6f5543; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbe6ed563; +- *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd0b1ffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x9d519ee8d2d84f1d; +- *((unsigned long*)& __m128i_op1[1]) = 0x8644ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4a6d0000ffff0000; +- __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002e2100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000040002; +- __m256i_out = __lasx_xvpcnt_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfefd7f7e7f7f7f7f; +- *((unsigned long*)& __m128i_op1[0]) = 0x9d519ee8d2d84f1d; +- *((unsigned long*)& __m128i_op2[1]) = 0x8644ffff0000ffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x85bd6b0e94d89998; +- *((unsigned long*)& __m128i_result[0]) = 0xd83c8081ffff8080; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefd7f7f7f7f7f7e; +- *((unsigned long*)& __m128i_op0[0]) = 0xdffdbffeba6f5543; +- *((unsigned long*)& __m128i_op1[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ffffff000000ff; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x2020000020200000; +- *((unsigned long*)& __m256d_op0[2]) = 0x2020000020200000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0008000001010000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0101000001010000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ffffff000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ffffff000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff000000ff00; +- __m128i_out = __lsx_vbsll_v(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0008000001010000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101000001010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_result[2]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_result[1]) = 0x0008000001010000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101000001010000; +- __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0008000001010000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101000001010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_result[2]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_result[1]) = 0x0008000001010000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101000001010000; +- __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffff000000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ff00; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; +- __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000040002; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x000000000000007f; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x85bd6b0e94d89998; +- *((unsigned long*)& __m128i_op0[0]) = 0xd83c8081ffff8080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; +- __m128i_out = __lsx_vmskltz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x000000000000007f; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000020001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x3b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000040002; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000040002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xefefefefefefefef; +- *((unsigned long*)& __m256i_result[2]) = 0xefefefefefefefef; +- *((unsigned long*)& __m256i_result[1]) = 0xefefefefefefef6e; +- *((unsigned long*)& __m256i_result[0]) = 0xeeeeeeeeeeeeeeee; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; +- __m128d_out = __lsx_vflogb_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0xefefefef; +- *((int*)& __m256_op0[6]) = 0xefefefef; +- *((int*)& __m256_op0[5]) = 0xefefefef; +- *((int*)& __m256_op0[4]) = 0xefefefef; +- *((int*)& __m256_op0[3]) = 0xefefefef; +- *((int*)& __m256_op0[2]) = 0xefefef6e; +- *((int*)& __m256_op0[1]) = 0xeeeeeeee; +- *((int*)& __m256_op0[0]) = 0xeeeeeeee; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x85bd6b0e94d89998; +- *((unsigned long*)& __m128i_op0[0]) = 0xd83c8081ffff8080; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x85bd6b0e94d89998; +- *((unsigned long*)& __m128i_result[0]) = 0xd83c8081ffff8080; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_op1[2]) = 0x2020000020200000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0008000001010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101000001010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; +- *((unsigned long*)& __m128i_op1[1]) = 0x85bd6b0e94d89998; +- *((unsigned long*)& __m128i_op1[0]) = 0xd83c8081ffff8080; +- *((unsigned long*)& __m128i_result[1]) = 0x85bd6b0e94d89998; +- *((unsigned long*)& __m128i_result[0]) = 0xd83c8081ffff808f; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0018001800180018; +- *((unsigned long*)& __m128i_result[0]) = 0x0018001800180018; +- __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xefefefefefefefef; +- *((unsigned long*)& __m256i_op0[2]) = 0xefefefefefefefef; +- *((unsigned long*)& __m256i_op0[1]) = 0xefefefefefefef6e; +- *((unsigned long*)& __m256i_op0[0]) = 0xeeeeeeeeeeeeeeee; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x1010101010101012; +- *((unsigned long*)& __m256i_result[2]) = 0x1010101010101012; +- *((unsigned long*)& __m256i_result[1]) = 0x1010101010101093; +- *((unsigned long*)& __m256i_result[0]) = 0x1111111111111113; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; +- *((unsigned long*)& __m128d_op0[0]) = 0xdffdbffeba6f5543; +- *((unsigned long*)& __m128d_op1[1]) = 0xfefd7f7f7f7f7f7e; +- *((unsigned long*)& __m128d_op1[0]) = 0xdffdbffeba6f5543; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff00fff0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff00fffffff0; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xfefd7f7f; +- *((int*)& __m128_op1[2]) = 0x7f7f7f7e; +- *((int*)& __m128_op1[1]) = 0xdffdbffe; +- *((int*)& __m128_op1[0]) = 0xba6f5543; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x7f7f7f7e; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0x10101010; +- *((int*)& __m256_op0[6]) = 0x10101012; +- *((int*)& __m256_op0[5]) = 0x10101010; +- *((int*)& __m256_op0[4]) = 0x10101012; +- *((int*)& __m256_op0[3]) = 0x10101010; +- *((int*)& __m256_op0[2]) = 0x10101093; +- *((int*)& __m256_op0[1]) = 0x11111111; +- *((int*)& __m256_op0[0]) = 0x11111113; +- *((int*)& __m256_result[7]) = 0xc2be0000; +- *((int*)& __m256_result[6]) = 0xc2be0000; +- *((int*)& __m256_result[5]) = 0xc2be0000; +- *((int*)& __m256_result[4]) = 0xc2be0000; +- *((int*)& __m256_result[3]) = 0xc2be0000; +- *((int*)& __m256_result[2]) = 0xc2be0000; +- *((int*)& __m256_result[1]) = 0xc2ba0000; +- *((int*)& __m256_result[0]) = 0xc2ba0000; +- __m256_out = __lasx_xvflogb_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe2ecd48adedc7c82; +- *((unsigned long*)& __m128i_op0[0]) = 0x25d666472b01d18d; +- *((unsigned long*)& __m128i_result[1]) = 0x0303020102020001; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000000000201; +- __m128i_out = __lsx_vclo_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0018001800180018; +- *((unsigned long*)& __m128i_op0[0]) = 0x0018001800180018; +- *((unsigned long*)& __m128i_op1[1]) = 0x85bd6b0e94d89998; +- *((unsigned long*)& __m128i_op1[0]) = 0xd83c8081ffff808f; +- *((unsigned long*)& __m128i_result[1]) = 0xfff489b693120950; +- *((unsigned long*)& __m128i_result[0]) = 0xfffc45a851c40c18; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0018001800180018; +- *((unsigned long*)& __m128i_op1[0]) = 0x0018001800180018; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xd8248069ffe78077; +- *((unsigned long*)& __m128i_op1[1]) = 0x85bd6b0e94d89998; +- *((unsigned long*)& __m128i_op1[0]) = 0xd83c8081ffff808f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xd82480697f678077; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0303020102020001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000201; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xd82480697f678077; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0301020100000004; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x1010101010101012; +- *((unsigned long*)& __m256i_op1[2]) = 0x1010101010101012; +- *((unsigned long*)& __m256i_op1[1]) = 0x1010101010101093; +- *((unsigned long*)& __m256i_op1[0]) = 0x1111111111111113; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1010101110101011; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1111111211111112; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- long_int_result = 0x0000000000000000; +- long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x2); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020001; +- *((unsigned long*)& __m256i_result[3]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[1]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[0]) = 0x1010101010121011; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x10101011; +- *((int*)& __m256_op1[4]) = 0x10101011; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x11111112; +- *((int*)& __m256_op1[0]) = 0x11111112; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; +- *((unsigned long*)& __m128d_op0[0]) = 0xdffdbffeba6f5543; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff489b693120950; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffc45a851c40c18; +- *((unsigned long*)& __m128i_result[1]) = 0xe0d56a9774f3ea31; +- *((unsigned long*)& __m128i_result[0]) = 0xe0dd268932a5edf9; +- __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xd82480697f678077; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vslti_w(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffff00fffffff0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff00fffffff0; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8080808080808081; +- *((unsigned long*)& __m256i_result[1]) = 0x8080808080808081; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1010101110101011; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1111111211111112; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000004040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000004444; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x2e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe0d56a9774f3ea31; +- *((unsigned long*)& __m128i_op0[0]) = 0xe0dd268932a5edf9; +- *((unsigned long*)& __m128i_op1[1]) = 0xe0d56a9774f3ea31; +- *((unsigned long*)& __m128i_op1[0]) = 0xe0dd268932a5edf9; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xd8248069ffe78077; +- *((unsigned long*)& __m128i_result[1]) = 0xe0d56a9774f3ea31; +- *((unsigned long*)& __m128i_result[0]) = 0xbddaa86803e33c2a; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xd82480697f678077; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8080808080808081; +- *((unsigned long*)& __m256i_op1[1]) = 0x8080808080808081; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000808000008080; +- *((unsigned long*)& __m256i_result[2]) = 0x0000808000008081; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffff00fffffff0; +- *((unsigned long*)& __m256i_result[3]) = 0x9f9f9f9f9f9f9f9f; +- *((unsigned long*)& __m256i_result[2]) = 0x9f9f9f9fffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x9f9f9f9f9f9f9f9f; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff9fffffffff; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0x9f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe0d56a9774f3ea31; +- *((unsigned long*)& __m128i_op0[0]) = 0xbddaa86803e33c2a; +- *((unsigned long*)& __m128i_op1[1]) = 0xe0d56a9774f3ea31; +- *((unsigned long*)& __m128i_op1[0]) = 0xbddaa86803e33c2a; +- *((unsigned long*)& __m128i_result[1]) = 0xff0600d50e9ef518; +- *((unsigned long*)& __m128i_result[0]) = 0xffefffa8007c000f; +- __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff489b693120950; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffc45a851c40c18; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000000a; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000a; +- __m128i_out = __lsx_vmaxi_d(__m128i_op0,10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff489b693120950; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffc45a851c40c18; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ff00; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c63636363; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff489b693120950; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc45a851c40c18; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4811fda96793b23a; +- *((unsigned long*)& __m128i_op0[0]) = 0x8f10624016be82fd; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfda9b23a624082fd; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xd8248069ffe78077; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0d0d0d0d0d0d0d0d; +- __m128i_out = __lsx_vmini_bu(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xd8248069ffe78077; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xd8248069ffe78077; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xe31c86e90cda86f7; +- __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xe31c86e90cda86f7; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000000e3; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x38); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff489b693120950; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc45a851c40c18; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffc45a851c40c18; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x48); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff0600d50e9ef518; +- *((unsigned long*)& __m128i_op0[0]) = 0xffefffa8007c000f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c63636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c63636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x9c9c9c9c00000000; +- __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c63636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000e3; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfda9b23a624082fd; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((int*)& __m128_result[3]) = 0x43630000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0xdc159371; +- *((int*)& __m128_result[0]) = 0x4f7fff00; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000808000008080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000808000008081; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000808000008080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000808000008081; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000081; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x68); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfda9b23a624082fd; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001010000; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfda9b23a624082fd; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7da9b23a624082fd; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001010000; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xd8248069; +- *((int*)& __m128_op0[0]) = 0x7f678077; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xd8248069; +- *((int*)& __m128_op1[0]) = 0x7f678077; +- *((int*)& __m128_result[3]) = 0x7fc00000; +- *((int*)& __m128_result[2]) = 0x7fc00000; +- *((int*)& __m128_result[1]) = 0x3f800000; +- *((int*)& __m128_result[0]) = 0x3f800000; +- __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7da9b23a624082fd; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x2002040404010420; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010180800101; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x2002040404010420; +- *((unsigned long*)& __m128i_op1[0]) = 0x0101010180800101; +- *((unsigned long*)& __m128i_result[1]) = 0x2002040404010420; +- *((unsigned long*)& __m128i_result[0]) = 0x9c9c9c9c80800101; +- __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001010000; +- *((unsigned long*)& __m128i_result[1]) = 0x03574e3a03574e3a; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x9c9c9c9c00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x03574e3a; +- *((int*)& __m128_op1[2]) = 0x03574e3a; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001010000; +- *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vslei_wu(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffff00fffffff0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffff00; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_result[2]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_result[1]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_result[0]) = 0x0202020202020202; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7da9b23a624082fd; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x03574e39e496cbc9; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001010000; +- __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03574e3a62407e03; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7da9b23a624082fd; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0505050505050505; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000005050000; +- __m128i_out = __lsx_vmini_bu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03574e39e496cbc9; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x03574e38e496cbc9; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x05050505; +- *((int*)& __m128_op0[2]) = 0x05050505; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x05050000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x03574e38; +- *((int*)& __m128_op1[0]) = 0xe496cbc9; +- *((int*)& __m128_result[3]) = 0x05050505; +- *((int*)& __m128_result[2]) = 0x05050505; +- *((int*)& __m128_result[1]) = 0x03574e38; +- *((int*)& __m128_result[0]) = 0xe496cbc9; +- __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x3e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffff00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000f; +- __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000005050000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0505000005050505; +- *((unsigned long*)& __m128i_result[1]) = 0x0028280000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0028280000282800; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03574e3b94f2ca31; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000001f807b89; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000005050000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0505000005050505; +- *((unsigned long*)& __m128i_result[1]) = 0x000d02540000007e; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001400140014; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000005050000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0505000005050505; +- *((unsigned long*)& __m128i_op1[1]) = 0x000d02540000007e; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001400140014; +- *((unsigned long*)& __m128i_op2[1]) = 0x0505050505050505; +- *((unsigned long*)& __m128i_op2[0]) = 0x03574e38e496cbc9; +- *((unsigned long*)& __m128i_result[1]) = 0x0005000400000004; +- *((unsigned long*)& __m128i_result[0]) = 0x0400001001150404; +- __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000006597cc3d; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x7505853d654185f5; +- *((unsigned long*)& __m128i_op1[0]) = 0x01010000fefe0101; +- *((unsigned long*)& __m128i_result[1]) = 0x000000006595cc1d; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0505050505050505; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000005050000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0028280000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0028280000282800; +- *((unsigned long*)& __m128i_result[1]) = 0x0028280000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000282800; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0005000400000004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0400001001150404; +- *((unsigned long*)& __m128i_op1[1]) = 0x0005000400000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0400001001150404; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000800000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000800000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000800000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000800000000; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0028280000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0028280000282800; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x7505853d654185f5; +- *((unsigned long*)& __m128i_op2[0]) = 0x01010000fefe0101; +- *((unsigned long*)& __m128i_result[1]) = 0x0028280000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x012927ffff272800; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcfb799f1; +- *((unsigned long*)& __m128i_op0[0]) = 0x0282800002828282; +- *((unsigned long*)& __m128i_op1[1]) = 0x5555001400005111; +- *((unsigned long*)& __m128i_op1[0]) = 0xffabbeab55110140; +- *((unsigned long*)& __m128i_result[1]) = 0xaaaaffebcfb748e0; +- *((unsigned long*)& __m128i_result[0]) = 0xfd293eab528e7ebe; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5555001400005111; +- *((unsigned long*)& __m128i_op0[0]) = 0xffabbeab55110140; +- *((unsigned long*)& __m128i_op1[1]) = 0x5555001400005111; +- *((unsigned long*)& __m128i_op1[0]) = 0xffabbeab55110140; +- *((unsigned long*)& __m128i_result[1]) = 0xaaaa00280000a222; +- *((unsigned long*)& __m128i_result[0]) = 0xfe567c56aa220280; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0028280000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x012927ffff272800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0028280000000000; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7505853d654185f5; +- *((unsigned long*)& __m128i_op0[0]) = 0x01010000fefe0101; +- *((unsigned long*)& __m128i_result[1]) = 0x7545c57d6541c5f5; +- *((unsigned long*)& __m128i_result[0]) = 0x41414040fefe4141; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x40); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x000d0254; +- *((int*)& __m128_op0[2]) = 0x0000007e; +- *((int*)& __m128_op0[1]) = 0x00000014; +- *((int*)& __m128_op0[0]) = 0x00140014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xaaaaffebcfb748e0; +- *((unsigned long*)& __m128i_op1[0]) = 0xfd293eab528e7ebe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0xffeb48e03eab7ebe; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xaaaaffebcfb748e0; +- *((unsigned long*)& __m128i_op1[0]) = 0xfd293eab528e7ebe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xaaaaffebcfb748e0; +- *((unsigned long*)& __m128i_op0[0]) = 0xfd293eab528e7ebe; +- *((unsigned long*)& __m128i_result[1]) = 0xf6e91c0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x51cfd7c000000000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xffeb48e03eab7ebe; +- *((unsigned long*)& __m128i_result[1]) = 0xffc0fac01200f800; +- *((unsigned long*)& __m128i_result[0]) = 0x0f80eac01f80ef80; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000001fc1a568; +- *((unsigned long*)& __m128i_op0[0]) = 0x02693fe0e7beb077; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_d(__m128i_op0,-6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000001fc1a568; +- *((unsigned long*)& __m128i_op0[0]) = 0x02693fe0e7beb077; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000030000; +- *((unsigned long*)& __m128i_result[0]) = 0x0006000200000000; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcfb799f1; +- *((unsigned long*)& __m128i_op0[0]) = 0x0282800002828282; +- *((int*)& __m128_result[3]) = 0xffffe000; +- *((int*)& __m128_result[2]) = 0xffffe000; +- *((int*)& __m128_result[1]) = 0xc1f6e000; +- *((int*)& __m128_result[0]) = 0xbb3e2000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0xf6e91c00; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x51cfd7c0; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x880c91b8; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x2d1da85b; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrecip_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x80008000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x80008000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x80008000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x80008000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; +- *((unsigned long*)& __m128i_result[1]) = 0x7404443064403aec; +- *((unsigned long*)& __m128i_result[0]) = 0x0000d6eefefc0498; +- __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf6e91c0000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x51cfd7c000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffd000700000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0014fff500000000; +- __m128i_out = __lsx_vsrai_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0x7f800000; +- *((int*)& __m128_op0[1]) = 0x2d1da85b; +- *((int*)& __m128_op0[0]) = 0x7f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007fffffff; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; +- *((unsigned long*)& __m128i_op1[1]) = 0x7505443065413aed; +- *((unsigned long*)& __m128i_op1[0]) = 0x0100d6effefd0498; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x2e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7404443064403aec; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000d6eefefc0498; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff7f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x2d1da85b7f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x002d001dd6a8ee5b; +- *((unsigned long*)& __m128i_result[0]) = 0xfe7ffc8004009800; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; +- *((unsigned long*)& __m128i_op1[1]) = 0x7505443065413aed; +- *((unsigned long*)& __m128i_op1[0]) = 0x0100d6effefd0498; +- *((unsigned long*)& __m128i_result[1]) = 0xb71289fdfbea3f69; +- *((unsigned long*)& __m128i_result[0]) = 0x4e17c2ffb4851a40; +- __m128i_out = __lsx_vmul_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xda4643d5301c4000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc1fc0d3bf55c4000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7505853d654185f5; +- *((unsigned long*)& __m128i_op1[0]) = 0x01010000fefe0101; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100010000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100010000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100010000; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff7f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x2d1da85b7f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7505853d654185f5; +- *((unsigned long*)& __m128i_op1[0]) = 0x01010000fefe0101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000013d; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x40); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffd000700000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0014fff500000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f03000780000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f15000a7f010101; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000013d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0010001000030000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0006000200000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0006000200000000; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; +- __m256d_out = __lasx_xvflogb_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x00100010; +- *((int*)& __m128_op0[2]) = 0x00030000; +- *((int*)& __m128_op0[1]) = 0x00060002; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfrint_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100010000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100010080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100010000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100010080; +- __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7505443065413aed; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000750500006541; +- *((unsigned long*)& __m128i_result[0]) = 0x00000100fffffefd; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0110000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0110000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0110000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0110000000000080; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0110000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0110000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0110000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0110000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0110000000000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0110000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0110000000000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0110000000000080; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0010001000030000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0006000200000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7505445465593af1; +- *((unsigned long*)& __m128i_op1[0]) = 0x0100d6effefd0498; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000030000; +- *((unsigned long*)& __m128i_result[0]) = 0x0006000200000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfff0000000000080; +- __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x0000001a; +- *((int*)& __m128_op0[2]) = 0xfffffff7; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001afffffff7; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000750500006541; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000100fffffefd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000080; +- __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x7f80780000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[1]) = 0x7f80780000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000004000; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000002400180004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000024; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff00000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x7fffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7e00fe0000000000; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0010001000030000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00060001fffe8003; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f80780000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7f80780000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00001000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00001000; +- __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7505445465593af1; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100d6effefd0498; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_hu(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xf000000000000000; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0x00001000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x00001000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1000000000000000; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001000; +- *((unsigned long*)& __m256i_op2[3]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xf001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; +- *((int*)& __m256_result[7]) = 0xc6000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0xc6000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000024; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000024; +- __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffc0ff81000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000600000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffc0ff81000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xc600000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xc600000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000750500006541; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000100fffffefd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f80780000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7f80780000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00200010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1c80780000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1c80780000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7f80780000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f80780000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x22); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x6b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffc0ff81000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff0ffe04000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1090918800000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1090918800000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1c80780000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1c80780000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x1c80780000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[1]) = 0x1c80780000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000004000; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000400000004000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000400000204010; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1fe01e0100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x1fe01e0100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xf000f00000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xf000f00000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xf000f00000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xf000f00000000001; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1fe01e0000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000f0000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000f0000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xfffffff0ffe04000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001fc0000; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_w(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1fe01e0100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x1fe01e0100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x1fe01e0100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x1fe01e0100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000400000004000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000400000204010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000020000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000020000010200; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffff0ffe04000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x3f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xf000f00000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xf000f00000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x6300000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0xf000f00000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x6300000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0xf000f00000000001; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x41); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000001; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0x00000001; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000001; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x00000001; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x39); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffff0ffe04000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_w(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001fc0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000040004000100; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001fc0000; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffc00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fffffc00; +- __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffc00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xce7ffffffffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xce7ffffffffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0x6300000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000002010; +- __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; +- __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001fc0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000002010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001fbdff0; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_result[0]) = 0x0400040004000400; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xce7ffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xce7ffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x327f010101010102; +- *((unsigned long*)& __m256i_result[2]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x327f010101010102; +- *((unsigned long*)& __m256i_result[0]) = 0x6300000000000000; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x22); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaxi_h(__m128i_op0,-11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7fc00000; +- *((int*)& __m128_result[2]) = 0x7fc00000; +- *((int*)& __m128_result[1]) = 0x7fc00000; +- *((int*)& __m128_result[0]) = 0x7fc00000; +- __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0x327f0101; +- *((int*)& __m256_op0[6]) = 0x01010102; +- *((int*)& __m256_op0[5]) = 0x63000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x327f0101; +- *((int*)& __m256_op0[2]) = 0x01010102; +- *((int*)& __m256_op0[1]) = 0x63000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xce7fffff; +- *((int*)& __m256_op1[6]) = 0xfffffffe; +- *((int*)& __m256_op1[5]) = 0x63000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0xce7fffff; +- *((int*)& __m256_op1[2]) = 0xfffffffe; +- *((int*)& __m256_op1[1]) = 0x63000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x327f010101010102; +- *((unsigned long*)& __m256i_op0[2]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x327f010101010102; +- *((unsigned long*)& __m256i_op0[0]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff4; +- __m256i_out = __lasx_xvmini_d(__m256i_op0,-12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xce7ffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xce7ffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x6300000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff39ffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff39ffffff; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x5e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128d_op1[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128d_op1[0]) = 0x0400040004000400; +- *((unsigned long*)& __m128d_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128d_result[0]) = 0x0400040004000400; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000040004000100; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128d_op1[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0x39ffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x39ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x7f800000; +- *((int*)& __m128_op0[2]) = 0x7f800000; +- *((int*)& __m128_op0[1]) = 0x7f800000; +- *((int*)& __m128_op0[0]) = 0x7f800000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_op0[0]) = 0x0400040004000400; +- unsigned_int_result = 0x0000000000000400; +- unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x5); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_op0[0]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; +- __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; +- __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_hu(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x9cffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x9cffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x6300000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x6300000000000001; +- __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000001000000010; +- *((unsigned long*)& __m256d_op1[3]) = 0x45d5555545d55555; +- *((unsigned long*)& __m256d_op1[2]) = 0x74555555e8aaaaaa; +- *((unsigned long*)& __m256d_op1[1]) = 0x45d5555545d55555; +- *((unsigned long*)& __m256d_op1[0]) = 0x74555555e8aaaaaa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff39ffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff39ffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; +- *((unsigned long*)& __m128i_result[0]) = 0x0404040404040404; +- __m128i_out = __lsx_vxori_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x5555555536aaaaac; +- *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_op0[1]) = 0x5555555536aaaaac; +- *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x5555555536aaaaac; +- *((unsigned long*)& __m256i_result[2]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_result[1]) = 0x5555555536aaaaac; +- *((unsigned long*)& __m256i_result[0]) = 0x55555555aaaaaaac; +- __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffff39ffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffff39ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextl_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x5555555536aaaaac; +- *((unsigned long*)& __m256i_op1[2]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_op1[1]) = 0x5555555536aaaaac; +- *((unsigned long*)& __m256i_op1[0]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; +- __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x6300000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x6300000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x5555555536aaaaac; +- *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_op0[1]) = 0x5555555536aaaaac; +- *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff39ffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff39ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x2b2b2b2b1bd5d5d6; +- *((unsigned long*)& __m256i_result[2]) = 0x2a2a2a2af2d5d5d6; +- *((unsigned long*)& __m256i_result[1]) = 0x2b2b2b2b1bd5d5d6; +- *((unsigned long*)& __m256i_result[0]) = 0x2a2a2a2af2d5d5d6; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000aaaa00008bfe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000aaaa0000aaaa; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000aaaa00008bfe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000aaaa0000aaaa; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000aaaa00008bfe; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000aaaa0000aaaa; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000aaaa00008bfe; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000aaaa0000aaaa; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000aaaa00008bfe; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000aaaa0000aaaa; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000aaaa00008bfe; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000aaaa0000aaaa; +- *((unsigned long*)& __m256d_result[3]) = 0x0000aaaa00008bfe; +- *((unsigned long*)& __m256d_result[2]) = 0x0000aaaa0000aaaa; +- *((unsigned long*)& __m256d_result[1]) = 0x0000aaaa00008bfe; +- *((unsigned long*)& __m256d_result[0]) = 0x0000aaaa0000aaaa; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2b2b2b2b1bd5d5d6; +- *((unsigned long*)& __m256i_op0[2]) = 0x2a2a2a2af2d5d5d6; +- *((unsigned long*)& __m256i_op0[1]) = 0x2b2b2b2b1bd5d5d6; +- *((unsigned long*)& __m256i_op0[0]) = 0x2a2a2a2af2d5d5d6; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002a0000002a; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002a0000002a; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff2ffffffd5; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffd5ffffffd6; +- __m256i_out = __lasx_vext2xv_w_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x55555555; +- *((int*)& __m256_op0[6]) = 0x36aaaaac; +- *((int*)& __m256_op0[5]) = 0x55555555; +- *((int*)& __m256_op0[4]) = 0xaaaaaaac; +- *((int*)& __m256_op0[3]) = 0x55555555; +- *((int*)& __m256_op0[2]) = 0x36aaaaac; +- *((int*)& __m256_op0[1]) = 0x55555555; +- *((int*)& __m256_op0[0]) = 0xaaaaaaac; +- *((unsigned long*)& __m256i_result[3]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_result[2]) = 0x5555555580000000; +- *((unsigned long*)& __m256i_result[1]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_result[0]) = 0x5555555580000000; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x8000000000000010; +- __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000001fffe; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5555555580000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5555555580000000; +- int_result = 0x0000000055555555; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x5); +- *((int*)& __m128_op0[3]) = 0xc1bdceee; +- *((int*)& __m128_op0[2]) = 0x242070db; +- *((int*)& __m128_op0[1]) = 0xe8c7b756; +- *((int*)& __m128_op0[0]) = 0xd76aa478; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff800000000000; +- __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5555555580000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5555555580000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x555555553f800000; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x55555555; +- *((int*)& __m256_op0[6]) = 0x3f800000; +- *((int*)& __m256_op0[5]) = 0x55555555; +- *((int*)& __m256_op0[4]) = 0x80000000; +- *((int*)& __m256_op0[3]) = 0x55555555; +- *((int*)& __m256_op0[2]) = 0x3f800000; +- *((int*)& __m256_op0[1]) = 0x55555555; +- *((int*)& __m256_op0[0]) = 0x80000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x0001fffe; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x0001fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x0000aaaa; +- *((int*)& __m256_op0[6]) = 0x00008bfe; +- *((int*)& __m256_op0[5]) = 0x0000aaaa; +- *((int*)& __m256_op0[4]) = 0x0000aaaa; +- *((int*)& __m256_op0[3]) = 0x0000aaaa; +- *((int*)& __m256_op0[2]) = 0x00008bfe; +- *((int*)& __m256_op0[1]) = 0x0000aaaa; +- *((int*)& __m256_op0[0]) = 0x0000aaaa; +- *((unsigned long*)& __m256d_result[3]) = 0x3795554000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x37917fc000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x3795554000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x37917fc000000000; +- __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff5556aaaa; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff5556aaaa; +- *((unsigned long*)& __m256i_op1[3]) = 0x0006ffff0004ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0006ffff0004ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0006ffff0004ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00020000aaa95556; +- *((unsigned long*)& __m256i_result[1]) = 0x0006ffff0004ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00020000aaa95556; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; +- __m128i_out = __lsx_vslli_h(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x2b2b2b2b1bd68080; +- *((unsigned long*)& __m256i_op1[2]) = 0x2a2ad4d4f2d8807e; +- *((unsigned long*)& __m256i_op1[1]) = 0x2b2b2b2b1bd68080; +- *((unsigned long*)& __m256i_op1[0]) = 0x2a2ad4d4f2d8807e; +- *((unsigned long*)& __m256i_result[3]) = 0xd4d5d4d5e42a7f80; +- *((unsigned long*)& __m256i_result[2]) = 0xd5d62b2c0d287f82; +- *((unsigned long*)& __m256i_result[1]) = 0xd4d5d4d5e42a7f80; +- *((unsigned long*)& __m256i_result[0]) = 0xd5d62b2c0d287f82; +- __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xfffffffc; +- *((int*)& __m256_op0[4]) = 0x5556aaa8; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xfffffffc; +- *((int*)& __m256_op0[0]) = 0x5556aaa8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrecip_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0x00000055; +- *((int*)& __m256_op0[6]) = 0x36aaaaac; +- *((int*)& __m256_op0[5]) = 0x55555555; +- *((int*)& __m256_op0[4]) = 0xaaaaaaac; +- *((int*)& __m256_op0[3]) = 0x00000055; +- *((int*)& __m256_op0[2]) = 0x36aaaaac; +- *((int*)& __m256_op0[1]) = 0x55555555; +- *((int*)& __m256_op0[0]) = 0xaaaaaaac; +- *((int*)& __m256_op1[7]) = 0x00060000; +- *((int*)& __m256_op1[6]) = 0x00040000; +- *((int*)& __m256_op1[5]) = 0x00025555; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00060000; +- *((int*)& __m256_op1[2]) = 0x00040000; +- *((int*)& __m256_op1[1]) = 0x00025555; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffc5556aaa8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffc5556aaa8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000007070205; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000002020100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000007070205; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000002020100; +- __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0002555500000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002555500000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffdaaaaffffffff; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0002555500000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002555500000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_result[2]) = 0x0002555400000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_result[0]) = 0x0002555400000000; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000005536aaaaac; +- *((unsigned long*)& __m256d_op0[2]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000005536aaaaac; +- *((unsigned long*)& __m256d_op0[0]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0002555400000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0002555400000000; +- *((unsigned long*)& __m256d_result[3]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000005554; +- *((unsigned long*)& __m256i_op1[2]) = 0xaaaa0000aaacfffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000005554; +- *((unsigned long*)& __m256i_op1[0]) = 0xaaaa0000aaacfffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000054; +- *((unsigned long*)& __m256i_result[2]) = 0x00aa000000ac00fe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000054; +- *((unsigned long*)& __m256i_result[0]) = 0x00aa000000ac00fe; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000005536aaaaac; +- *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000005536aaaaac; +- *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000060102150101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000060102150101; +- __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_w(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x6664666466646664; +- *((unsigned long*)& __m128i_result[0]) = 0x6664666466646664; +- __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x66); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000054; +- *((unsigned long*)& __m256i_op0[2]) = 0x00aa000000ac00fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000054; +- *((unsigned long*)& __m256i_op0[0]) = 0x00aa000000ac00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0002a80000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0002b0000003f800; +- *((unsigned long*)& __m256i_result[1]) = 0x0002a80000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0002b0000003f800; +- __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000005536aaaaac; +- *((unsigned long*)& __m256i_op0[2]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000005536aaaaac; +- *((unsigned long*)& __m256i_op0[0]) = 0x55555555aaaaaaac; +- *((unsigned long*)& __m256i_result[3]) = 0x0000005136aaaaa8; +- *((unsigned long*)& __m256i_result[2]) = 0x55515551aaaaaaa8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000005136aaaaa8; +- *((unsigned long*)& __m256i_result[0]) = 0x55515551aaaaaaa8; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00aa000000ac00fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00aa000000ac00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0xff00000000000000; +- __m128i_out = __lsx_vbsll_v(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128d_op0[0]) = 0xff00000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; +- __m128i_out = __lsx_vfclass_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x3f2c678e38d1104c; +- *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff5556aaaa; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff5556aaaa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff9fffffffbffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffdaaaaffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfe7ffffffeffffc0; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfe7ffffffeffffc0; +- __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0002555500000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002555500000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1818181818181818; +- *((unsigned long*)& __m256i_result[2]) = 0x1818181818181818; +- *((unsigned long*)& __m256i_result[1]) = 0x1818181818181818; +- *((unsigned long*)& __m256i_result[0]) = 0x1818181818181818; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x555555553f800000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffe0000fffe0000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffe0000fffe0000; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0002555500000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002555500000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x3b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0007000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0007000000000000; +- __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x40000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x40000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; +- __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000060000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000060000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000ff00fe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00ff; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextl_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00ff; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffe1; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x5980000000000000; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000016600000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000016600000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000016600000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000016600000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000016600000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000016600000000; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x3); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000060000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000060000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000060000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000060000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x0000fffe; +- *((int*)& __m128_op0[0]) = 0x0000ffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x5980000000000000; +- __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00060000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00060000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000166; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000166; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x00555555553f8000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00555555553f8000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fffe0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001fffe; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x555555553f800000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x555555553f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x59800000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x59800000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x59800000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x59800000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x2c27000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x2c27000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00fe00ff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvmskltz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x5900000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x5900000000000000; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x59800000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x59800000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x0eb7aaaa; +- *((int*)& __m256_op1[6]) = 0xa6e6ac80; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x0eb7aaaa; +- *((int*)& __m256_op1[2]) = 0xa6e6ac80; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x555555553f800000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x555555553f800000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x353bb67af686ad9b; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x353bb67af686ad9b; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_result[2]) = 0x5982000200020002; +- *((unsigned long*)& __m256i_result[1]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_result[0]) = 0x5982000200020002; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x353bb67af686ad9b; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x353bb67af686ad9b; +- *((unsigned long*)& __m256i_op1[3]) = 0x0200000200000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x2c27000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0200000200000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x2c27000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1cfd000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0200000200000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x2c27000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0200000200000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x2c27000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000400000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000400000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5980000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5980000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x41d6600000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x41d6600000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,-14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1cfd000000000000; +- __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; +- __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x59800000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x59800000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x41d66000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x41d66000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x41d6600000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x41d6600000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0x41d6600000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0x41d6600000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7fffffffffffffff; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x1cfd000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x56a09e662ab46b31; +- *((unsigned long*)& __m128i_op0[0]) = 0xb4b8122ef4054bb3; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x56a09e662ab46b31; +- *((unsigned long*)& __m128i_result[0]) = 0xb4b8122ef4054bb3; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000400000001; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000400000001; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x56a09e662ab46b31; +- *((unsigned long*)& __m128i_op1[0]) = 0xb4b8122ef4054bb3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x02b504f305a5c091; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[2]) = 0x6aeaeaeaeaeaeaea; +- *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[0]) = 0x6aeaeaeaeaeaeaea; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x02b504f305a5c091; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x02b504f305a5c091; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000005602d2; +- __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x00000000000000ac; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op1[2]) = 0x6aeaeaeaeaeaeaea; +- *((unsigned long*)& __m256i_op1[1]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op1[0]) = 0x6aeaeaeaeaeaeaea; +- *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_d(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x3c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000fe00ff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x0000ffff; +- *((int*)& __m256_op0[4]) = 0x0000ffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x0000ffff; +- *((int*)& __m256_op0[0]) = 0x0000ffff; +- *((int*)& __m256_op1[7]) = 0x0eb7aaaa; +- *((int*)& __m256_op1[6]) = 0xa6e6ac80; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x0eb7aaaa; +- *((int*)& __m256_op1[2]) = 0xa6e6ac80; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplve0_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffff01ff01; +- __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000101fd01fe; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000101fd01fe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128d_op0[0]) = 0x0001000101fd01fe; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0020000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0020000000000000; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x4b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x73); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000020; +- *((unsigned long*)& __m256d_op1[2]) = 0x0020000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000020; +- *((unsigned long*)& __m256d_op1[0]) = 0x0020000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffff01ff01; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0020000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0020000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffefefffffefe; +- __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff02; +- __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff02; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000101fd01fe; +- *((unsigned long*)& __m128i_result[1]) = 0xff80ff80ff80ff80; +- *((unsigned long*)& __m128i_result[0]) = 0xff80ff8080008000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000101fd01fe; +- *((unsigned long*)& __m128i_op1[1]) = 0xff80ff80ff80ff80; +- *((unsigned long*)& __m128i_op1[0]) = 0xff80ff8080008000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000101fd01fe; +- __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ff02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000001fe; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0xffffffff; +- *((int*)& __m128_result[0]) = 0xffffffff; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001400000014; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001fe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128d_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x3c600000ff800000; +- *((unsigned long*)& __m128d_result[0]) = 0xfffffffffffffffe; +- __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001400000014; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000001; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xfffffffe; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0xffffff02; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0011001100110011; +- __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000014; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; +- *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_result[0]) = 0x04000400fbfffb02; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x3c600000ff800000; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffefe00000000; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3c600000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0f180000ffe00000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0x0f180000ffe00000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0x3c600000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x3c5fffffff7fffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffefffeff00feff; +- __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_h(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3c600000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xff01ff01; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0xffffffff; +- *((int*)& __m128_op2[2]) = 0xffffffff; +- *((int*)& __m128_op2[1]) = 0xffffffff; +- *((int*)& __m128_op2[0]) = 0xff01ff01; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0xffffffff; +- *((int*)& __m128_result[0]) = 0x7f01ff01; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0x3c600000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xc39fffff007fffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00fd; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x7f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3c5fffffff7fffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffefffeff00feff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x03fc03fc03f803f8; +- *((unsigned long*)& __m256i_result[2]) = 0x03fc03fc03f803f8; +- *((unsigned long*)& __m256i_result[1]) = 0x03fc03fc03f803f8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000013ffffffec; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000013ffffebd8; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000013ffffffec; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000013ffffebd8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x03fc03fc03f803f8; +- *((unsigned long*)& __m256d_op0[2]) = 0x03fc03fc03f803f8; +- *((unsigned long*)& __m256d_op0[1]) = 0x03fc03fc03f803f8; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7be2468acf15f39c; +- *((unsigned long*)& __m256d_result[2]) = 0x7be2468acf15f39c; +- *((unsigned long*)& __m256d_result[1]) = 0x7be2468acf15f39c; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xc39fffff007fffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000fe00fd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff0e700000000; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc39fffff007fffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00fd; +- *((unsigned long*)& __m128i_result[1]) = 0x0e7ffffc01fffffc; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000003f803f4; +- __m128i_out = __lsx_vslli_w(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffefe00000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0e7ffffc01fffffc; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000003f803f4; +- *((unsigned long*)& __m128i_result[1]) = 0x1000000010000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100100000; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0e7ffffc01fffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000003f803f4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0e7ffffc01fffffc; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000003f803f4; +- *((unsigned long*)& __m128i_result[1]) = 0x0e7ffffc01fffffc; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001003f803f4; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7be2468acf15f39c; +- *((unsigned long*)& __m256i_op1[2]) = 0x7be2468acf15f39c; +- *((unsigned long*)& __m256i_op1[1]) = 0x7be2468acf15f39c; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7be2468acf15f39c; +- *((unsigned long*)& __m256i_result[2]) = 0x7be2468acf15f39c; +- *((unsigned long*)& __m256i_result[1]) = 0x7be2468acf15f39c; +- *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc39fffff007fffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000fe00fd; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; +- *((unsigned long*)& __m128i_result[1]) = 0x61cf003f0000007f; +- *((unsigned long*)& __m128i_result[0]) = 0x000000003c607f80; +- __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000013ffffffec; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000013ffffebd8; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000013ffffffec; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000013ffffebd8; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffec; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffebd8; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffec; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffebd8; +- __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff7f01ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff7f01ff01; +- __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x36); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x85); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffec; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffebd8; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffec; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffebd8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffec; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffec; +- __m256i_out = __lasx_xvexth_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x78c00000ff000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x78c00000ff000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x78c00000ff000000; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffec; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffec; +- *((unsigned long*)& __m256i_result[3]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000000010000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100100000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff1; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff1; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff1; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x78c00000ff000000; +- int_op1 = 0x0000000000000400; +- *((unsigned long*)& __m128i_result[1]) = 0xff000000ff000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff000000ff000000; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff000000ff000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff000000ff000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff000000ff000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff000000ff000000; +- __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000003ff000003ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff7f01ff01; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000400; +- *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_result[0]) = 0x0400040004000400; +- __m128i_out = __lsx_vreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x78c00000ff000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000078c00000; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffec; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffec; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0010000000100000; +- *((unsigned long*)& __m128i_result[0]) = 0x0010000000100000; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0010000000100000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010000000100000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,-2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff7f01ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff7f01ff01; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffe03; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe03; +- __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0000000d; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xfffffe03; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xfffffe03; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffefe00000000; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_hu(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000000010000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100100000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x2000000020000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200200000; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000b5207f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x2000000020000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200200000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x6a57a30ff0000000; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefefffffefe; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffefe00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x37); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x4f800000; +- __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000078c00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000078c00000; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000078c00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000d; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xf7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff00000000; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000b5207f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x00000000b5207f80; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x00000000b5207f80; +- __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000000010000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000180100100000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000b5207f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00001801b5307f80; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000078c00000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x6a57a30ff0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000f0000000; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff01fffffffeff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff01fffffffeff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff01fffffffeff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff01fffffffeff; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- long_int_result = 0x0000000000000000; +- long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,-14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000f0000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vslei_wu(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000b5207f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000b5207f80; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000400; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op0[0]) = 0x6a57a30ff0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x6a57a30ff0000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001801f0307f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00001801f0307f80; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000f0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x6a57a30ff0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe01fe01fe; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfrecip_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; +- __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x01fe01fe00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_result[1]) = 0x000d000d000d000d; +- *((unsigned long*)& __m128i_result[0]) = 0x000d000d000d000d; +- __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000b5207f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000400; +- *((unsigned long*)& __m256i_result[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000d000d000d000d; +- *((unsigned long*)& __m128i_op0[0]) = 0x000d000d000d000d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000680000006800; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x2d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x01fe01fe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe00000000; +- __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_result[0]) = 0x040004000400040d; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_op0[0]) = 0x040004000400040d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0501050105010501; +- *((unsigned long*)& __m128i_result[0]) = 0x050105010501050c; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,-2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[0]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_op0[0]) = 0x040004000400040d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_result[0]) = 0x040004000400040d; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffc0000fffc0000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffc0000fffc0000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffc0000fffc0000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffc0000fffc0000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x05010501; +- *((int*)& __m128_op1[2]) = 0x05010501; +- *((int*)& __m128_op1[1]) = 0x05010501; +- *((int*)& __m128_op1[0]) = 0x0501050c; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffc0000fffc0000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffc0000fffc0000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffc0000fffc0000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffc0000fffc0000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- long_op0 = 0x0000000000000400; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000400; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000400; +- __m128i_out = __lsx_vreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x6a57a30ff0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000400; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000400; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000400; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000400; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[1]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_op0[2]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_op0[1]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_op1[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[0]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x1410141014101410; +- *((unsigned long*)& __m256i_result[2]) = 0x1410141014101410; +- *((unsigned long*)& __m256i_result[1]) = 0x1410141014101410; +- *((unsigned long*)& __m256i_result[0]) = 0x1410141014101410; +- __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000400; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000040d; +- __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000040d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000040d; +- __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x000000000000040d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000400; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000400; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000040d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xcc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000040d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vbitrev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000006; +- __m128i_out = __lsx_vmini_d(__m128i_op0,6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x33); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000fff3; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000006; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000006; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21f32eafa486fd38; +- *((unsigned long*)& __m128i_op0[0]) = 0x407c2ca3d3430357; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x21f32eaf5b7a02c8; +- *((unsigned long*)& __m128i_result[0]) = 0x407c2ca32cbd0357; +- __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00010001; +- *((int*)& __m256_op1[6]) = 0x00010001; +- *((int*)& __m256_op1[5]) = 0x00010001; +- *((int*)& __m256_op1[4]) = 0x00010001; +- *((int*)& __m256_op1[3]) = 0x00010001; +- *((int*)& __m256_op1[2]) = 0x00010001; +- *((int*)& __m256_op1[1]) = 0x00010001; +- *((int*)& __m256_op1[0]) = 0x00010001; +- *((int*)& __m256_result[7]) = 0x00010001; +- *((int*)& __m256_result[6]) = 0x00010001; +- *((int*)& __m256_result[5]) = 0x00010001; +- *((int*)& __m256_result[4]) = 0x00010001; +- *((int*)& __m256_result[3]) = 0x00010001; +- *((int*)& __m256_result[2]) = 0x00010001; +- *((int*)& __m256_result[1]) = 0x00010001; +- *((int*)& __m256_result[0]) = 0x00010001; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21f32eaf5b7a02c8; +- *((unsigned long*)& __m128i_op0[0]) = 0x407c2ca32cbd0357; +- *((unsigned long*)& __m128i_result[1]) = 0x10f917d72d3d01e4; +- *((unsigned long*)& __m128i_result[0]) = 0x203e16d116de012b; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fff3; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x000000000000040d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010400; +- __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00010400; +- *((int*)& __m128_op1[3]) = 0x10f917d7; +- *((int*)& __m128_op1[2]) = 0x2d3d01e4; +- *((int*)& __m128_op1[1]) = 0x203e16d1; +- *((int*)& __m128_op1[0]) = 0x16de012b; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x21f32eaf; +- *((int*)& __m128_op0[2]) = 0x5b7a02c8; +- *((int*)& __m128_op0[1]) = 0x407c2ca3; +- *((int*)& __m128_op0[0]) = 0x2cbd0357; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00010400; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x10f917d72d3d01e4; +- *((unsigned long*)& __m128i_op0[0]) = 0x203e16d116de012b; +- *((unsigned long*)& __m128i_result[1]) = 0x887c8beb969e00f2; +- *((unsigned long*)& __m128i_result[0]) = 0x101f8b680b6f8095; +- __m128i_out = __lsx_vrotri_h(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000040d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0008ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffff0008ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x10f917d72d3d01e4; +- *((unsigned long*)& __m128i_op1[0]) = 0x203e16d116de012b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x10f917d72d3d01e4; +- *((unsigned long*)& __m128i_op0[0]) = 0x203e16d116de012b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000017d7000001e4; +- *((unsigned long*)& __m128i_result[0]) = 0x000016d10000012b; +- __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x887c8beb; +- *((int*)& __m128_op0[2]) = 0x969e00f2; +- *((int*)& __m128_op0[1]) = 0x101f8b68; +- *((int*)& __m128_op0[0]) = 0x0b6f8095; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x10f917d72d3d01e4; +- *((unsigned long*)& __m128i_op1[0]) = 0x203e16d116de012b; +- *((unsigned long*)& __m128i_result[1]) = 0x10f917d72d3d01e4; +- *((unsigned long*)& __m128i_result[0]) = 0x203e16d116de012b; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000800080008000; +- __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfa31dfa21672e711; +- *((unsigned long*)& __m128i_op0[0]) = 0x1304db85e468073a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x80008000; +- *((int*)& __m256_op0[6]) = 0x80008000; +- *((int*)& __m256_op0[5]) = 0x80008000; +- *((int*)& __m256_op0[4]) = 0x80008000; +- *((int*)& __m256_op0[3]) = 0x80008000; +- *((int*)& __m256_op0[2]) = 0x80008000; +- *((int*)& __m256_op0[1]) = 0x80008000; +- *((int*)& __m256_op0[0]) = 0x80008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfa31dfa21672e711; +- *((unsigned long*)& __m128i_op1[0]) = 0x1304db85e468073a; +- *((unsigned long*)& __m128i_op2[1]) = 0x887c8beb969e00f2; +- *((unsigned long*)& __m128i_op2[0]) = 0x101f8b680b6f8095; +- *((unsigned long*)& __m128i_result[1]) = 0x7582ed22cb1c6e12; +- *((unsigned long*)& __m128i_result[0]) = 0x35aaa61c944f34c2; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x10f917d72d3d01e4; +- *((unsigned long*)& __m128i_op0[0]) = 0x203e16d116de012b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x10f917d72d3d01e4; +- *((unsigned long*)& __m128i_result[0]) = 0x203e16d116de012b; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fff; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff51cf8da; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffd6040188; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000101fffff8b68; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000b6fffff8095; +- *((unsigned long*)& __m128i_result[1]) = 0xfffff51cffffd604; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_w(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x0000101f; +- *((int*)& __m128_op0[2]) = 0xffff8b68; +- *((int*)& __m128_op0[1]) = 0x00000b6f; +- *((int*)& __m128_op0[0]) = 0xffff8095; +- *((int*)& __m128_op1[3]) = 0x10f917d7; +- *((int*)& __m128_op1[2]) = 0x2d3d01e4; +- *((int*)& __m128_op1[1]) = 0x203e16d1; +- *((int*)& __m128_op1[0]) = 0x16de012b; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrecip_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x10f917d72d3d01e4; +- *((unsigned long*)& __m128i_op1[0]) = 0x203e16d116de012b; +- *((unsigned long*)& __m128i_result[1]) = 0x00f900d7003d00e4; +- *((unsigned long*)& __m128i_result[0]) = 0x003e00d100de002b; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; +- *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; +- *((unsigned long*)& __m128i_result[1]) = 0xf51cf8dad6040188; +- *((unsigned long*)& __m128i_result[0]) = 0x0982eadaf234ed87; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x2b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00f900d7003d00e4; +- *((unsigned long*)& __m128i_op1[0]) = 0x003e00d100de002b; +- *((unsigned long*)& __m128i_result[1]) = 0x7f4000007f040000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f0200007f020000; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000101fffff8b68; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000b6fffff8095; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000b6fffff8095; +- __m128i_out = __lsx_vextl_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff51cf8da; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffd6040188; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff01018888; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x50); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; +- *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff51cf8da; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffd6040188; +- *((unsigned long*)& __m128i_result[1]) = 0x00020002000d0000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000020f2300ee; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00020002000d0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000020f2300ee; +- *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7f4000007f040000; +- *((unsigned long*)& __m128d_op0[0]) = 0x7f0200007f020000; +- *((unsigned long*)& __m128d_op1[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffff01018888; +- *((unsigned long*)& __m128d_result[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffff01018888; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffff01018888; +- *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x7f400000; +- *((int*)& __m128_op0[2]) = 0x7f040000; +- *((int*)& __m128_op0[1]) = 0x7f020000; +- *((int*)& __m128_op0[0]) = 0x7f020000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0x0014002c; +- *((int*)& __m128_op1[1]) = 0xfffefffe; +- *((int*)& __m128_op1[0]) = 0x003b0013; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0x3ea5016b; +- *((int*)& __m128_result[1]) = 0xfffefffe; +- *((int*)& __m128_result[0]) = 0x3f6fb04d; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff01018888; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff01018888; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff3ea5016b; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffefffe3f6fb04d; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000d96f; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffd83b; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000002aaad555; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000002aaad555; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007fff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff00000000; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff01018888; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010108082626; +- *((unsigned long*)& __m128i_result[0]) = 0x01010101ffff7878; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8dada; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000145ad; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000300003e6e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff8f8da00; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff01018888; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffff00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00ffff00; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x73); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xf51cf8dad6040188; +- *((unsigned long*)& __m128i_op1[0]) = 0x0982e2daf234ed87; +- *((unsigned long*)& __m128i_result[1]) = 0xf51cf8dad6040188; +- *((unsigned long*)& __m128i_result[0]) = 0x0982e2daf234ed87; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007fff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007fff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_result[2]) = 0x0202810102020202; +- *((unsigned long*)& __m256i_result[1]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_result[0]) = 0x0202810102020202; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfffffffff8f8da00; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffff01018888; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000003ea5016c; +- *((unsigned long*)& __m128d_op1[0]) = 0xfffefefd3f7027c5; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; +- *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0ae3072529fbfe78; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0202020202020202; +- *((unsigned long*)& __m256d_op0[2]) = 0x0202810102020202; +- *((unsigned long*)& __m256d_op0[1]) = 0x0202020202020202; +- *((unsigned long*)& __m256d_op0[0]) = 0x0202810102020202; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x00007fff00000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x00007fff00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x00007fff00000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x00007fff00000000; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_op1[2]) = 0x0202810102020202; +- *((unsigned long*)& __m256i_op1[1]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_op1[0]) = 0x0202810102020202; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; +- __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000003f00000000; +- __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf51cf8dad6040188; +- *((unsigned long*)& __m128i_op0[0]) = 0x0982e2daf234ed87; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xf51df8dbd6050189; +- *((unsigned long*)& __m128i_result[0]) = 0x0983e2dbf235ed87; +- __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000003f00000000; +- __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xf51df8db; +- *((int*)& __m128_op0[2]) = 0xd6050189; +- *((int*)& __m128_op0[1]) = 0x0983e2db; +- *((int*)& __m128_op0[0]) = 0xf235ed87; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0x3ea5016b; +- *((int*)& __m128_op1[1]) = 0xfffefffe; +- *((int*)& __m128_op1[0]) = 0x3f6fb04d; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x4000400000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000040004000; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffff00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00ffff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffe000000f6; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe000000f6; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x01010101ffffff00; +- *((unsigned long*)& __m128i_result[0]) = 0x01010101000000f6; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_op1[2]) = 0x0202810102020202; +- *((unsigned long*)& __m256i_op1[1]) = 0x0202020202020202; +- *((unsigned long*)& __m256i_op1[0]) = 0x0202810102020202; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fefe0000fefe; +- *((unsigned long*)& __m256i_result[2]) = 0x00007fff0000fefe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fefe0000fefe; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff0000fefe; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfe00fe00fe00fd01; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe00fffefe0100f6; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0100010000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0100010000010000; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000003f00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000003f0000; +- __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0100010000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0100010000010000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffff0000010000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfe00fe00fe00fd01; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe00fffefe0100f6; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff0001ffffff0a; +- __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff700000009; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; +- int_op1 = 0x0000000000000400; +- *((unsigned long*)& __m256i_result[3]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_result[2]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_result[1]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_result[0]) = 0x003f003f003f003f; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000003f0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000003f0; +- __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op0[2]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op0[1]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op0[0]) = 0x003f003f003f003f; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_result[2]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_result[1]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_result[0]) = 0x003f003f003f003f; +- __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xfffffff7; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d5d5d5d5d55; +- __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x5d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op2[2]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op2[1]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op2[0]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000017e; +- __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000008; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x8f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfc01fcfefc02fdf7; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe00fcfffe01fd01; +- *((unsigned long*)& __m128i_result[1]) = 0xfc01fd13fc02fe0c; +- *((unsigned long*)& __m128i_result[0]) = 0xfe00fd14fe01fd16; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000003f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000003f0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x30); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfc01fd13fc02fe0c; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe00fd14fe01fd16; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff0000010000; +- *((unsigned long*)& __m128i_result[1]) = 0xfc01fd1300000001; +- *((unsigned long*)& __m128i_result[0]) = 0xfe00fd1400010000; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000001f; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000001f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000001f; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000001f; +- __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff0001ffffff0a; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100000101; +- *((unsigned long*)& __m128i_result[0]) = 0x000100ff010101f6; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000003f0000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfc01fd1300000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe00fd1400010000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfc01fd1300000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe00fd1400010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfc01fcfefc02fdf7; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe00fcfffe01fd01; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x5d5d5d5d5d5d5d55; +- *((unsigned long*)& __m128i_result[1]) = 0xfc01fcfefc02fdf7; +- *((unsigned long*)& __m128i_result[0]) = 0xfe00fcfffe21fd01; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfc01fcfefc02fdf7; +- *((unsigned long*)& __m128d_op0[0]) = 0xfe00fcfffe01fd01; +- *((unsigned long*)& __m128d_op1[1]) = 0xfc01fd1300000001; +- *((unsigned long*)& __m128d_op1[0]) = 0xfe00fd1400010000; +- *((unsigned long*)& __m128d_op2[1]) = 0xfc01fcfefc02fdf7; +- *((unsigned long*)& __m128d_op2[0]) = 0xfe00fcfffe01fd01; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffff0000010000; +- *((unsigned long*)& __m128i_op1[1]) = 0xabff54f1ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xa5f7458b000802ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fff7fc01; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x5d5d5d5d5d5d5d55; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d005d5d5d55; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000017e; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000005e02; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000005e02; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000005e02; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000005e02; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffffff700000009; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xfffffff700000009; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xabff54e911f71b07; +- *((unsigned long*)& __m128i_op0[0]) = 0xa9ec4882f216ea11; +- *((unsigned long*)& __m128i_op1[1]) = 0xfc01fcfefc02fdf7; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe00fcfffe01fd01; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xaa0051e90ff91808; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfc01fd1300000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe00fd1400010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff7fc01; +- *((unsigned long*)& __m128i_result[1]) = 0xfe00fe8980000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff007e8a7ffc7e00; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x01ff000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x01ff000000000000; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x80000000fff7fc01; +- __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000003effe1; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000003effe1; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000003effe1; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000003effe1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000005e02; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000005e02; +- *((unsigned long*)& __m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_result[2]) = 0xc2c2c2c2c2c29cc0; +- *((unsigned long*)& __m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_result[0]) = 0xc2c2c2c2c2c29cc0; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0xc2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000005e02; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000005e02; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op0[2]) = 0xc2c2c2c2c2c29cc0; +- *((unsigned long*)& __m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op0[0]) = 0xc2c2c2c2c2c29cc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfc01fcfefc02fdf7; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe00fcfffe01fd01; +- *((unsigned long*)& __m128i_op1[1]) = 0xfc01fd1300000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe00fd1400010000; +- *((unsigned long*)& __m128i_result[1]) = 0xc72ef153fc02fdf7; +- *((unsigned long*)& __m128i_result[0]) = 0xca31bf15fd010000; +- __m128i_out = __lsx_vmul_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfc01fd1300000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe00fd1400010000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f0000007f000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8080000180800100; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff7fc01; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x80000000fff6fc00; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x80000000fff6fc00; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000080000000; +- __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x80000000fff6fc00; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f0000007f000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080000180800100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ff00ffff; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffc01; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffc01; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffffe; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256d_op0[2]) = 0xc2c2c2c2c2c29cc0; +- *((unsigned long*)& __m256d_op0[1]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256d_op0[0]) = 0xc2c2c2c2c2c29cc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op2[2]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op2[1]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op2[0]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128d_op1[1]) = 0xb55ccf30f52a6a68; +- *((unsigned long*)& __m128d_op1[0]) = 0x4e0018eceb82c53a; +- *((unsigned long*)& __m128d_result[1]) = 0x355ccf30f52a6a68; +- *((unsigned long*)& __m128d_result[0]) = 0xce0018eceb82c53a; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefff6fff80002; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff000000fefb0000; +- __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefff6fff80002; +- *((unsigned long*)& __m128i_op1[1]) = 0x82c53a0000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc72ef153fc02fdf7; +- *((unsigned long*)& __m128i_result[1]) = 0x007d00c500ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0038000e0003ff03; +- __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefff6fff80002; +- *((unsigned long*)& __m128i_op1[1]) = 0x82c53a0000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc72ef153fc02fdf7; +- *((unsigned long*)& __m128i_result[1]) = 0x82c539ffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xc72df14afbfafdf9; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1716151416151413; +- *((unsigned long*)& __m128i_op0[0]) = 0x1514131214131211; +- *((unsigned long*)& __m128i_result[1]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m128i_result[0]) = 0xfff3fff3fff3fff3; +- __m128i_out = __lsx_vmini_h(__m128i_op0,-13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op0[1]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_result[2]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_result[1]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_result[0]) = 0xe161616161614e60; +- __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000aaaaaaaa; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000aaab555b; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000aaaaaaaa; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000aaab555b; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007d00c50177ac5b; +- *((unsigned long*)& __m128i_op0[0]) = 0xac82aa88a972a36a; +- *((unsigned long*)& __m128i_result[1]) = 0x000000c5ac01015b; +- *((unsigned long*)& __m128i_result[0]) = 0xaaacac88a3a9a96a; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x7c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslli_h(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x82c539ffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xc72df14afbfafdf9; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7d3ac60000000000; +- __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op1[2]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op1[1]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op1[0]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000061; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000061; +- __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffff800fffff800; +- *((unsigned long*)& __m256i_result[2]) = 0xfffff800fffff800; +- *((unsigned long*)& __m256i_result[1]) = 0xfffff800fffff800; +- *((unsigned long*)& __m256i_result[0]) = 0xfffff800fffff800; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x82c539ffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xc72df14afbfafdf9; +- *((unsigned long*)& __m128i_op1[1]) = 0x82c539ffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xc72df14afbfafdf9; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff7fc01; +- *((unsigned long*)& __m128i_op1[1]) = 0x82c539ffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xc72df14afbfafdf9; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x23); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fbf83468; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fbf83468; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op0[1]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7d3ac60000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007d3ac600; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xfffefff6; +- *((int*)& __m128_op0[0]) = 0xfff80002; +- *((int*)& __m128_op1[3]) = 0x000000c5; +- *((int*)& __m128_op1[2]) = 0xac01015b; +- *((int*)& __m128_op1[1]) = 0xaaacac88; +- *((int*)& __m128_op1[0]) = 0xa3a9a96a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op0[1]) = 0xe161616161616161; +- *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; +- *((unsigned long*)& __m256d_result[3]) = 0xc1be9e9e9f000000; +- *((unsigned long*)& __m256d_result[2]) = 0x41d8585858400000; +- *((unsigned long*)& __m256d_result[1]) = 0xc1be9e9e9f000000; +- *((unsigned long*)& __m256d_result[0]) = 0x41d8585858400000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xe161616161614e60; +- *((unsigned long*)& __m256d_op0[2]) = 0xe161616161614e60; +- *((unsigned long*)& __m256d_op0[1]) = 0xe161616161614e60; +- *((unsigned long*)& __m256d_op0[0]) = 0xe161616161614e60; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007d3ac600; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_b(__m128i_op0,0x7); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffefff6fff80002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x08fdc221bfdb1927; +- *((unsigned long*)& __m128i_op0[0]) = 0x4303c67e9b7fb213; +- *((unsigned long*)& __m128i_op1[1]) = 0x08fdc221bfdb1927; +- *((unsigned long*)& __m128i_op1[0]) = 0x4303c67e9b7fb213; +- *((unsigned long*)& __m128i_result[1]) = 0x00100184017e0032; +- *((unsigned long*)& __m128i_result[0]) = 0x0086018c01360164; +- __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000800080008000; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000c5ac01015b; +- *((unsigned long*)& __m128i_op1[0]) = 0xaaacac88a3a9a96a; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; +- __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00100184017e0032; +- *((unsigned long*)& __m128i_op0[0]) = 0x0086018c01360164; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffff33c4b1e67; +- *((unsigned long*)& __m128i_result[1]) = 0x0000800c0004300c; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x08fdc221; +- *((int*)& __m128_op0[2]) = 0xbfdb1927; +- *((int*)& __m128_op0[1]) = 0x4303c67e; +- *((int*)& __m128_op0[0]) = 0x9b7fb213; +- *((int*)& __m128_op1[3]) = 0x0000800c; +- *((int*)& __m128_op1[2]) = 0x0004300c; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,-15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x009500b10113009c; +- *((unsigned long*)& __m128i_op0[0]) = 0x009500b10113009c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000005d5d; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xc1be9e9e9f000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x41d8585858400000; +- *((unsigned long*)& __m256d_op0[1]) = 0xc1be9e9e9f000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x41d8585858400000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000005d5d; +- *((unsigned long*)& __m128d_op1[1]) = 0x08fdc221bfdb1927; +- *((unsigned long*)& __m128d_op1[0]) = 0x4303c67e9b7fb213; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffefff6fff80002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xc1be9e9e9f000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x41d8585858400000; +- *((unsigned long*)& __m256d_op0[1]) = 0xc1be9e9e9f000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x41d8585858400000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc1be9e9e9f000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x41d8585858400000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc1be9e9e9f000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x41d8585858400000; +- *((unsigned long*)& __m256i_result[3]) = 0x1076000016160000; +- *((unsigned long*)& __m256i_result[2]) = 0x1610000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1076000016160000; +- *((unsigned long*)& __m256i_result[0]) = 0x1610000000000000; +- __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000000000000; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x31); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000005d5d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000005d5d; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x014200c200c200ae; +- *((unsigned long*)& __m256i_op0[2]) = 0x014200c200c200ae; +- *((unsigned long*)& __m256i_op0[1]) = 0x014200c200c200ae; +- *((unsigned long*)& __m256i_op0[0]) = 0x014200c200c200ae; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,-4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op0[2]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op0[1]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op0[0]) = 0xe161616161614e60; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_result[2]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_result[1]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_result[0]) = 0xe161616161614f61; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000005d5d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x41); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0002000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0002000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op1[0]) = 0x00ff00ff00ff00fe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0xffffffff; +- *((int*)& __m128_result[0]) = 0xffffffff; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_op1[2]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_op1[1]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_op1[0]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_result[3]) = 0x0000616100004f61; +- *((unsigned long*)& __m256i_result[2]) = 0x0000616100004f61; +- *((unsigned long*)& __m256i_result[1]) = 0x0000616100004f61; +- *((unsigned long*)& __m256i_result[0]) = 0x0000616100004f61; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op0[2]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op0[1]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op0[0]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op1[3]) = 0x1086658a18ba3594; +- *((unsigned long*)& __m256i_op1[2]) = 0x160fe9f000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1086658a18ba3594; +- *((unsigned long*)& __m256i_op1[0]) = 0x160fe9f000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x07a232640bfc1a73; +- *((unsigned long*)& __m256i_result[2]) = 0x0a66f497ff9effa9; +- *((unsigned long*)& __m256i_result[1]) = 0x07a232640bfc1a73; +- *((unsigned long*)& __m256i_result[0]) = 0x0a66f497ff9effa9; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op0[2]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op0[1]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op0[0]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_result[3]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_result[2]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_result[1]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_result[0]) = 0xffc0ffc0ffc0ffc0; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1086658a18ba3594; +- *((unsigned long*)& __m256i_op0[2]) = 0x160fe9f000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1086658a18ba3594; +- *((unsigned long*)& __m256i_op0[0]) = 0x160fe9f000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_op1[2]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_op1[1]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_op1[0]) = 0xe161616161614f61; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000616100004f61; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000616100004f61; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000616100004f61; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000616100004f61; +- *((unsigned long*)& __m256i_result[3]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_result[2]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_result[1]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_result[0]) = 0x4df5b1a3ed5e02c1; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op2[2]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op2[1]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op2[0]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; +- __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000c5ac01015b; +- *((unsigned long*)& __m128i_op0[0]) = 0xaaacac88a3a9a96a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op0[2]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op0[0]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ffffff1e9e9e9e; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff9e9eb09e; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ffffff1e9e9e9e; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff9e9eb09e; +- *((unsigned long*)& __m256i_result[3]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_result[2]) = 0xffc00000ffc0ffc0; +- *((unsigned long*)& __m256i_result[1]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_result[0]) = 0xffc00000ffc0ffc0; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256d_op1[2]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256d_op1[1]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256d_op1[0]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; +- *((unsigned long*)& __m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op0[2]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op0[1]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op0[0]) = 0xfebdff3eff3dff52; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op0[2]) = 0xffc00000ffc0ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op0[0]) = 0xffc00000ffc0ffc0; +- *((unsigned long*)& __m256i_result[3]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_result[2]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_result[1]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_result[0]) = 0xfff90000fff9fff9; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; +- __m128i_out = __lsx_vmini_bu(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; +- __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff0004ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffff0004ff; +- __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000e0000000e; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000e13; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000e13; +- __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000e0000000e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00ffffff1e9e9e9e; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffff9e9eb09e; +- *((unsigned long*)& __m256d_op0[1]) = 0x00ffffff1e9e9e9e; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff9e9eb09e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x66); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfrint_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op0[2]) = 0xffc00000ffc0ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m256i_op0[0]) = 0xffc00000ffc0ffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffcfee0fe00ffe0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffcfee0fe00ffe0; +- __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000001300000013; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ffffffffff; +- __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_b(__m128i_op0,-12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_result[3]) = 0x00000001fff9fff8; +- *((unsigned long*)& __m256i_result[2]) = 0x00000001fff9fff8; +- *((unsigned long*)& __m256i_result[1]) = 0x00000001fff9fff8; +- *((unsigned long*)& __m256i_result[0]) = 0x00000001fff9fff8; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_op1[3]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_op1[2]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_op1[1]) = 0x108659e46485f7e1; +- *((unsigned long*)& __m256i_op1[0]) = 0x4df5b1a3ed5e02c1; +- *((unsigned long*)& __m256i_result[3]) = 0x081abb9d36ee1037; +- *((unsigned long*)& __m256i_result[2]) = 0x1617eb17129bfd38; +- *((unsigned long*)& __m256i_result[1]) = 0x081abb9d36ee1037; +- *((unsigned long*)& __m256i_result[0]) = 0x1617eb17129bfd38; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmini_h(__m256i_op0,6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x80000000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000001fff9fff8; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001fff9fff8; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000001fff9fff8; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001fff9fff8; +- *((unsigned long*)& __m256i_op1[3]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op1[2]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op1[1]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op1[0]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffcfee0fe00ffe0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffcfee0fe00ffe0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fffc0000fee0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fe000000ffe0; +- __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0x0080001300000013; +- *((unsigned long*)& __m128i_op0[0]) = 0x0080001300000013; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0080001300000013; +- *((unsigned long*)& __m128i_result[0]) = 0x0080001300000013; +- __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001300000013; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffff900000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffff900000003; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x3f3f3f3900000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x3f3f3f3900000003; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffc0000fee0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fe000000ffe0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffff900000003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000003; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7ffe00007f000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; +- *((unsigned long*)& __m128i_result[0]) = 0x0303030303030303; +- __m128i_out = __lsx_vpcnt_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0052005200520052; +- *((unsigned long*)& __m128i_result[0]) = 0x0052005200520052; +- __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffff900000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffff900000003; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; +- __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xffff0000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffff0000; +- *((int*)& __m256_op1[7]) = 0xfffefffe; +- *((int*)& __m256_op1[6]) = 0xfffefffe; +- *((int*)& __m256_op1[5]) = 0xfffefffe; +- *((int*)& __m256_op1[4]) = 0xfffefffe; +- *((int*)& __m256_op1[3]) = 0xfffefffe; +- *((int*)& __m256_op1[2]) = 0xfffefffe; +- *((int*)& __m256_op1[1]) = 0xfffefffe; +- *((int*)& __m256_op1[0]) = 0xfffefffe; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0xffff0000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0xffff0000; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ffe00007f000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ffe00007f000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x1616161616161616; +- *((unsigned long*)& __m256i_result[2]) = 0x161616167fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7ffe16167f161616; +- *((unsigned long*)& __m256i_result[0]) = 0x161616167fffffff; +- __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256d_op1[2]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256d_op1[1]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256d_op1[0]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000800000000; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff90000fff9fff9; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ffe00007f000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffff000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_result[1]) = 0xffff7fff00007f00; +- *((unsigned long*)& __m256i_result[0]) = 0xffff000100007fff; +- __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xcd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x79); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ffe00007f000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_result[1]) = 0x017e00ff017e00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff017e01fe; +- __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7fff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f007f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f7f7fff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfff807f; +- *((unsigned long*)& __m256i_result[1]) = 0xbf803fbfbfbfbfbf; +- *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfff807f; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_result[0]) = 0x5252525252525252; +- __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; +- __m256i_out = __lasx_xvmskgez_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3a2a3a2a3a2a3a2a; +- *((unsigned long*)& __m256i_op0[2]) = 0x3a2a3a2a3aaa45aa; +- *((unsigned long*)& __m256i_op0[1]) = 0x3a553f7f7a2a3a2a; +- *((unsigned long*)& __m256i_op0[0]) = 0x3a2a3a2a3aaa45aa; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_result[3]) = 0x1d949d949d949d95; +- *((unsigned long*)& __m256i_result[2]) = 0x1d949d949e1423d4; +- *((unsigned long*)& __m256i_result[1]) = 0x1de9a03f3dd41d95; +- *((unsigned long*)& __m256i_result[0]) = 0x1d949d949e1423d4; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x000000003fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000003fffffff; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x1f3d2101; +- *((int*)& __m256_op0[6]) = 0x1f3d2101; +- *((int*)& __m256_op0[5]) = 0x1f3d2101; +- *((int*)& __m256_op0[4]) = 0xd07dbf01; +- *((int*)& __m256_op0[3]) = 0x9f1fd080; +- *((int*)& __m256_op0[2]) = 0x1f3d2101; +- *((int*)& __m256_op0[1]) = 0x1f3d2101; +- *((int*)& __m256_op0[0]) = 0xd07dbf01; +- *((int*)& __m256_op1[7]) = 0x1d949d94; +- *((int*)& __m256_op1[6]) = 0x9d949d95; +- *((int*)& __m256_op1[5]) = 0x1d949d94; +- *((int*)& __m256_op1[4]) = 0x9e1423d4; +- *((int*)& __m256_op1[3]) = 0x1de9a03f; +- *((int*)& __m256_op1[2]) = 0x3dd41d95; +- *((int*)& __m256_op1[1]) = 0x1d949d94; +- *((int*)& __m256_op1[0]) = 0x9e1423d4; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x8001b72e; +- *((int*)& __m256_result[6]) = 0x0001b72e; +- *((int*)& __m256_result[5]) = 0x8001b72e; +- *((int*)& __m256_result[4]) = 0xaf12d5f0; +- *((int*)& __m256_result[3]) = 0x00024763; +- *((int*)& __m256_result[2]) = 0x9d9cb530; +- *((int*)& __m256_result[1]) = 0x8001b72e; +- *((int*)& __m256_result[0]) = 0xaf12d5f0; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m256_op0[7]) = 0xfffefffe; +- *((int*)& __m256_op0[6]) = 0xfffefffe; +- *((int*)& __m256_op0[5]) = 0xfffefffe; +- *((int*)& __m256_op0[4]) = 0xfffefffe; +- *((int*)& __m256_op0[3]) = 0xfffefffe; +- *((int*)& __m256_op0[2]) = 0xfffefffe; +- *((int*)& __m256_op0[1]) = 0xfffefffe; +- *((int*)& __m256_op0[0]) = 0xfffefffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x017e017e01dd61de; +- *((unsigned long*)& __m256d_op0[2]) = 0x5d637d043bc4fc43; +- *((unsigned long*)& __m256d_op0[1]) = 0x01dcc2dce31bc35d; +- *((unsigned long*)& __m256d_op0[0]) = 0x5e041d245b85fc43; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x5d637d043bc4fc43; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x5e041d245b85fc43; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x017e00ff017e00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op1[3]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op1[2]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op1[1]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op1[0]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_result[3]) = 0x1f9d9f9d1f9db29f; +- *((unsigned long*)& __m256i_result[2]) = 0x1f9d9f9d201cb39e; +- *((unsigned long*)& __m256i_result[1]) = 0x201c9f9d201cb29f; +- *((unsigned long*)& __m256i_result[0]) = 0x1f9d9f9d201cb39e; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1616161616161616; +- *((unsigned long*)& __m256i_op0[2]) = 0x161616167fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ffe16167f161616; +- *((unsigned long*)& __m256i_op0[0]) = 0x161616167fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; +- *((unsigned long*)& __m256i_result[2]) = 0x2c2c2c2cfefefefe; +- *((unsigned long*)& __m256i_result[1]) = 0xfefc2c2cfe2c2c2c; +- *((unsigned long*)& __m256i_result[0]) = 0x2c2c2c2cfefefefe; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1616161616161616; +- *((unsigned long*)& __m256i_op0[2]) = 0x161616167fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ffe16167f161616; +- *((unsigned long*)& __m256i_op0[0]) = 0x161616167fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xc7c7c7c7c7c7c7c7; +- *((unsigned long*)& __m256i_result[2]) = 0xc7c7c7c7ae2e2e2e; +- *((unsigned long*)& __m256i_result[1]) = 0xae2fc7c7aec7c7c7; +- *((unsigned long*)& __m256i_result[0]) = 0xc7c7c7c7ae2e2e2e; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0xd1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1f9d9f9d1f9db29f; +- *((unsigned long*)& __m256i_op0[2]) = 0x1f9d9f9d201cb39e; +- *((unsigned long*)& __m256i_op0[1]) = 0x201c9f9d201cb29f; +- *((unsigned long*)& __m256i_op0[0]) = 0x1f9d9f9d201cb39e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007773; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000003373; +- __m256i_out = __lasx_xvmskltz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; +- __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dffbfff00000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0200400000000001; +- unsigned_int_result = 0x0000000000000001; +- unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x000000000000ff00; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007773; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003373; +- *((unsigned long*)& __m256i_result[3]) = 0xbbbbbbbbbbbbbbbb; +- *((unsigned long*)& __m256i_result[2]) = 0xbbbbbbbbbbbb8888; +- *((unsigned long*)& __m256i_result[1]) = 0xbbbbbbbbbbbbbbbb; +- *((unsigned long*)& __m256i_result[0]) = 0xbbbbbbbbbbbb8888; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0x44); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000007773; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000003373; +- *((unsigned long*)& __m256d_op1[3]) = 0x1616161616161616; +- *((unsigned long*)& __m256d_op1[2]) = 0x161616167fffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x7ffe16167f161616; +- *((unsigned long*)& __m256d_op1[0]) = 0x161616167fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000100000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x2c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007773; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003373; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0800000008000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0800000008000000; +- __m128i_out = __lsx_vrotri_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_du(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000100000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x000100010001fffe; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000003fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003fffffff; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x1); +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff2fffffff2; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff2fffffff2; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff2fffffff2; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff2fffffff2; +- __m256i_out = __lasx_xvmini_w(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; +- __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x000100010001fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000020002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000020002; +- __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op0[2]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op0[1]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op0[0]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_result[3]) = 0x0703030307030203; +- *((unsigned long*)& __m256i_result[2]) = 0x0703030307030203; +- *((unsigned long*)& __m256i_result[1]) = 0x0703030307030203; +- *((unsigned long*)& __m256i_result[0]) = 0x0703030307030203; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000003fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x003fffffff000000; +- __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000020002; +- *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; +- *((unsigned long*)& __m128i_result[0]) = 0x0303030303030303; +- __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00005555aaabfffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003fffffff000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000000ab; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0003000300030003; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003000700020005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0003000300030003; +- *((unsigned long*)& __m128i_result[0]) = 0x0003000700020005; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff000100000000; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0003000300030003; +- *((unsigned long*)& __m128d_op0[0]) = 0x0003000700020005; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfrint_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000017e007ffe02; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x000100010001fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000100010001fffd; +- __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000008000000080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x000100010001fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000800080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf6f6f6f6f6f6f6f6; +- *((unsigned long*)& __m256i_result[2]) = 0xf6f6f6f6f6f6f6f6; +- *((unsigned long*)& __m256i_result[1]) = 0xf6f6f6f6f6f6f6f6; +- *((unsigned long*)& __m256i_result[0]) = 0xf6f6f6f6f6f6f6f6; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00ff00ff; +- *((int*)& __m256_op0[6]) = 0x00ff00ff; +- *((int*)& __m256_op0[5]) = 0x00ff00ff; +- *((int*)& __m256_op0[4]) = 0x017e01fe; +- *((int*)& __m256_op0[3]) = 0x017e00ff; +- *((int*)& __m256_op0[2]) = 0x017e00ff; +- *((int*)& __m256_op0[1]) = 0x00ff00ff; +- *((int*)& __m256_op0[0]) = 0x017e01fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01fe8001b72e0001; +- *((unsigned long*)& __m256i_op0[2]) = 0xb72e8001b72eaf12; +- *((unsigned long*)& __m256i_op0[1]) = 0x01fe000247639d9c; +- *((unsigned long*)& __m256i_op0[0]) = 0xb5308001b72eaf12; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0002ff80ffb70000; +- *((unsigned long*)& __m256i_result[2]) = 0xffb7ff80ffd0ffd8; +- *((unsigned long*)& __m256i_result[1]) = 0x00010000002fff9e; +- *((unsigned long*)& __m256i_result[0]) = 0xffb5ff80ffd0ffd8; +- __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x38); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vexth_du_wu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8001b72e0001b72e; +- *((unsigned long*)& __m256i_op0[2]) = 0x8001b72eaf12d5f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x000247639d9cb530; +- *((unsigned long*)& __m256i_op0[0]) = 0x8001b72eaf12d5f0; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_result[3]) = 0xff81ffe50001ffe5; +- *((unsigned long*)& __m256i_result[2]) = 0xff81ffe5ffa6ffc6; +- *((unsigned long*)& __m256i_result[1]) = 0x000200aafe9affe5; +- *((unsigned long*)& __m256i_result[0]) = 0xff81ffe5ffa6ffc6; +- __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x017e00ff017e00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x01fe8001b72e0001; +- *((unsigned long*)& __m256i_op1[2]) = 0xb72e8001b72eaf12; +- *((unsigned long*)& __m256i_op1[1]) = 0x01fe000247639d9c; +- *((unsigned long*)& __m256i_op1[0]) = 0xb5308001b72eaf12; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff017e00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x017e00ff017e01fe; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff017e00ff; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e01fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffff000100000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff000100000000; +- __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; +- *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff017e00ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x017e00ff017e01fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff017e00ff; +- *((unsigned long*)& __m256i_result[3]) = 0xb70012c4b714fc1e; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff017e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fe02b71c199d; +- *((unsigned long*)& __m256i_result[0]) = 0x017e017e00ff017e; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01fe8001b72e0001; +- *((unsigned long*)& __m256i_op0[2]) = 0xb72e8001b72eaf12; +- *((unsigned long*)& __m256i_op0[1]) = 0x01fe000247639d9c; +- *((unsigned long*)& __m256i_op0[0]) = 0xb5308001b72eaf12; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x26); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00007fffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00007fffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000100010001fffd; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000100010; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000100010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000100010; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op1[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((int*)& __m128_op2[3]) = 0x00307028; +- *((int*)& __m128_op2[2]) = 0x003f80b0; +- *((int*)& __m128_op2[1]) = 0x0040007f; +- *((int*)& __m128_op2[0]) = 0xff800000; +- *((int*)& __m128_result[3]) = 0x80307028; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0x8040007f; +- *((int*)& __m128_result[0]) = 0xffffffff; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; +- __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xb70036db; +- *((int*)& __m256_op1[6]) = 0x12c4007e; +- *((int*)& __m256_op1[5]) = 0xb7146213; +- *((int*)& __m256_op1[4]) = 0xfc1e0049; +- *((int*)& __m256_op1[3]) = 0x000000fe; +- *((int*)& __m256_op1[2]) = 0xfe02fffe; +- *((int*)& __m256_op1[1]) = 0xb71c413b; +- *((int*)& __m256_op1[0]) = 0x199d04b5; +- *((int*)& __m256_op2[7]) = 0xb70036db; +- *((int*)& __m256_op2[6]) = 0x12c4007e; +- *((int*)& __m256_op2[5]) = 0xb7146213; +- *((int*)& __m256_op2[4]) = 0xfc1e0049; +- *((int*)& __m256_op2[3]) = 0x000000fe; +- *((int*)& __m256_op2[2]) = 0xfe02fffe; +- *((int*)& __m256_op2[1]) = 0xb71c413b; +- *((int*)& __m256_op2[0]) = 0x199d04b5; +- *((int*)& __m256_result[7]) = 0x370036db; +- *((int*)& __m256_result[6]) = 0x92c4007e; +- *((int*)& __m256_result[5]) = 0x37146213; +- *((int*)& __m256_result[4]) = 0x7c1e0049; +- *((int*)& __m256_result[3]) = 0x800000fe; +- *((int*)& __m256_result[2]) = 0x7e02fffe; +- *((int*)& __m256_result[1]) = 0x371c413b; +- *((int*)& __m256_result[0]) = 0x999d04b5; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0x80307028; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x8040007f; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002ff80ffb70000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffb7ff80ffd0ffd8; +- *((unsigned long*)& __m256i_op1[1]) = 0x00010000002fff9e; +- *((unsigned long*)& __m256i_op1[0]) = 0xffb5ff80ffd0ffd8; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0048007f002f0028; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x004a007f002f0028; +- __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc080ffff0049ffd2; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0049ffd2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffeffb9ff9d; +- *((unsigned long*)& __m256i_op0[0]) = 0x01620133004b0032; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002ff80ffb70000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffb7ff80ffd0ffd8; +- *((unsigned long*)& __m256i_op1[1]) = 0x00010000002fff9e; +- *((unsigned long*)& __m256i_op1[0]) = 0xffb5ff80ffd0ffd8; +- *((unsigned long*)& __m256i_result[3]) = 0xc080ffff0049ffd2; +- *((unsigned long*)& __m256i_result[2]) = 0x0002ff80ffb70000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fffeffb9ff9d; +- *((unsigned long*)& __m256i_result[0]) = 0x00010000002fff9e; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000020302030; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000020302030; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xb70036db12c4007e; +- *((unsigned long*)& __m256i_op1[2]) = 0xb7146213fc1e0049; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000fefe02fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xb71c413b199d04b5; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x43); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000020302030; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000020302030; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000100010; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000100010; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc080ffff0049ffd2; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0049ffd2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffeffb9ff9d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00630064004bffd0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x80307028ffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x8040007fffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00307028003f80b0; +- *((unsigned long*)& __m128i_op0[0]) = 0x0040007fff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000003f80b0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ff800000; +- __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00007f8000007f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007f8000007f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000003f80b0; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; +- *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff00ff00ffff00; +- *((unsigned long*)& __m256i_result[2]) = 0xff000000ff00ff00; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffff00ffff; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000000ff00ff; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xb327b9363c99d32e; +- *((unsigned long*)& __m128i_op0[0]) = 0xa1e7b475d925730f; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000003f80b0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128i_op2[1]) = 0x00007f8000007f80; +- *((unsigned long*)& __m128i_op2[0]) = 0x00007f8000007f80; +- *((unsigned long*)& __m128i_result[1]) = 0xb327b9363c992b2e; +- *((unsigned long*)& __m128i_result[0]) = 0xa1e7b475d925730f; +- __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8001b72e0001b72e; +- *((unsigned long*)& __m256i_op0[2]) = 0x8001b72eaf12d5f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x000247639d9cb530; +- *((unsigned long*)& __m256i_op0[0]) = 0x8001b72eaf12d5f0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe056fd9d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffceba70; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000003f80b0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0xb327b9363c992b2e; +- *((unsigned long*)& __m128i_op1[0]) = 0xa1e7b475d925730f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000001ff00; +- __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x370036db92c4007e; +- *((unsigned long*)& __m256i_op0[2]) = 0x371462137c1e0049; +- *((unsigned long*)& __m256i_op0[1]) = 0x800000fe7e02fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x371c413b999d04b5; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002ff80ffb70000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffb7ff80ffd0ffd8; +- *((unsigned long*)& __m256i_op1[1]) = 0x00010000002fff9e; +- *((unsigned long*)& __m256i_op1[0]) = 0xffb5ff80ffd0ffd8; +- *((unsigned long*)& __m256i_op2[3]) = 0xffff00ff00ffff00; +- *((unsigned long*)& __m256i_op2[2]) = 0xff000000ff00ff00; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffff00ffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xff00000000ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x37fe365b920d007e; +- *((unsigned long*)& __m256i_result[2]) = 0x381462137d1e0149; +- *((unsigned long*)& __m256i_result[1]) = 0x80ff00fe7e020060; +- *((unsigned long*)& __m256i_result[0]) = 0x381c413b99cd04dd; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xb327b9363c992b2e; +- *((unsigned long*)& __m128i_op1[0]) = 0xa1e7b475d925730f; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff3c992b2e; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff730f; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vnori_b(__m128i_op0,0xa6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x80307028ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x8040007fffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0101ff010101; +- __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff00ff00ffff00; +- *((unsigned long*)& __m256i_op1[2]) = 0xff000000ff00ff00; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffff00ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00000000ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000180000000; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffe5; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00307028003f80b0; +- *((unsigned long*)& __m128i_op0[0]) = 0x0040007fff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffc0ffffff81; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff008000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0060e050007f0160; +- *((unsigned long*)& __m128i_result[0]) = 0x0040007fff800000; +- __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007f8000007f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007f8000007f80; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000003fc; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000003fc; +- __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; +- *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffff81ffffeb2f; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f6ee0570b4e; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000018de; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffb4ffcec0f1; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffff81ffffeb2f; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003f6ee0570b4e; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000018de; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffb4ffcec0f1; +- *((unsigned long*)& __m256i_result[3]) = 0x00000001ffffeab0; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000e0574abc; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000018de; +- *((unsigned long*)& __m256i_result[0]) = 0x00000001ffcec0a5; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0060e050007f0160; +- *((unsigned long*)& __m128i_op1[0]) = 0x0040007fff800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc080ffff0049ffd2; +- *((unsigned long*)& __m256i_op0[2]) = 0x0002ff80ffb70000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffeffb9ff9d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00010000002fff9e; +- *((int*)& __m256_result[7]) = 0x34000000; +- *((int*)& __m256_result[6]) = 0xfff00000; +- *((int*)& __m256_result[5]) = 0xfff6e000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x33800000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x363c0000; +- *((int*)& __m256_result[0]) = 0xfff3c000; +- __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003fc; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000003fc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0x3c992b2e; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffff730f; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff730f; +- __m128i_out = __lsx_vfrintrz_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0101017f0101017f; +- __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0002ff80ffb70000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffb7ff80ffd0ffd8; +- *((unsigned long*)& __m256i_op0[1]) = 0x00010000002fff9e; +- *((unsigned long*)& __m256i_op0[0]) = 0xffb5ff80ffd0ffd8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0002ff80ffb70000; +- *((unsigned long*)& __m256i_result[2]) = 0xffb7ff80ffd0ffd8; +- *((unsigned long*)& __m256i_result[1]) = 0x00010000002fff9e; +- *((unsigned long*)& __m256i_result[0]) = 0xffb5ff80ffd0ffd8; +- __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000180000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xc080ffff0049ffd2; +- *((unsigned long*)& __m256i_op1[2]) = 0x0002ff80ffb70000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000fffeffb9ff9d; +- *((unsigned long*)& __m256i_op1[0]) = 0x00010000002fff9e; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffd2; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ff8000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000080000000; +- __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x34000000fff00000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff6e00000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3380000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x363c0000fff3c000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffb7146213; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffc1e0049; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffb71c413b; +- *((unsigned long*)& __m256i_op1[0]) = 0xf3317da580000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x34000000fff00000; +- *((unsigned long*)& __m256i_result[2]) = 0xfff6e00000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x3380000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x363c0000fff3c000; +- __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x34000000fff00000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff6e00000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3380000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x363c0000fff3c000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000030000000c; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000500000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000800000010; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1268f057137a0267; +- *((unsigned long*)& __m128i_op0[0]) = 0x0048137ef886fae0; +- *((unsigned long*)& __m128i_result[1]) = 0xff000000ff00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0xff00ff0000000000; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff946c; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff946b; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff3c992b2e; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff730f; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffff946c; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffff946b; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff946c; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffdffff946c; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1268f057137a0267; +- *((unsigned long*)& __m128i_op0[0]) = 0x0048137ef886fae0; +- *((unsigned long*)& __m128i_result[1]) = 0x000000490000004d; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffffe2; +- __m128i_out = __lsx_vsrai_w(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000000f3; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000f3; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xb70036db12c4007e; +- *((unsigned long*)& __m256i_op1[2]) = 0xb7146213fc1e0049; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000fefe02fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xb71c413b199d04b5; +- *((unsigned long*)& __m256i_op2[3]) = 0xb70036db12c4007e; +- *((unsigned long*)& __m256i_op2[2]) = 0xb7146213fc1e0049; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000fefe02fffe; +- *((unsigned long*)& __m256i_op2[0]) = 0xb71c413b199d04b5; +- *((unsigned long*)& __m256i_result[3]) = 0xd100645944100004; +- *((unsigned long*)& __m256i_result[2]) = 0xd1908469108400d1; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000404040104; +- *((unsigned long*)& __m256i_result[0]) = 0xd1108199714910f9; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff3c992b2e; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff730f; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff3c992b2e; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff730f; +- __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x34000000fff00000; +- *((unsigned long*)& __m256d_op0[2]) = 0xfff6e00000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x3380000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x363c0000fff3c000; +- *((unsigned long*)& __m256d_op1[3]) = 0x000000030000000c; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000001100000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000500000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000800000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff000000ff00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00ff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0100000001000100; +- *((unsigned long*)& __m128i_result[0]) = 0x0100010000000000; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xd100645944100004; +- *((unsigned long*)& __m256i_op0[2]) = 0xd1908469108400d1; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000404040104; +- *((unsigned long*)& __m256i_op0[0]) = 0xd1108199714910f9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000004040104; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffd1108199; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000714910f9; +- __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff732a; +- *((unsigned long*)& __m128i_result[1]) = 0x807f7fff807f807f; +- *((unsigned long*)& __m128i_result[0]) = 0x807f807f7fff3995; +- __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000490000004d; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffffff; +- long_int_result = 0x00000001ffffffff; +- long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000004040104; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffd1108199; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000714910f9; +- *((unsigned long*)& __m256d_op1[3]) = 0x000000030000000c; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000001100000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000500000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000800000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffe5; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff2; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff2; +- __m128i_out = __lsx_vavgr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000490000004d; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xff000000ff00ff00; +- *((unsigned long*)& __m128i_op1[0]) = 0xff00ff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000049ffffff4d; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff01ffffffff; +- __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000001faea9ec; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000003; +- *((int*)& __m256_op1[6]) = 0x0000000c; +- *((int*)& __m256_op1[5]) = 0x00000011; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000005; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000008; +- *((int*)& __m256_op1[0]) = 0x00000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000004040104; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffd1108199; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000714910f9; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffd10000006459; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000441000000004; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000040400000104; +- *((unsigned long*)& __m256i_result[3]) = 0xffffd10000000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffd1108199; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000104; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffe5; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffe5; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff732a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0100000001000100; +- *((unsigned long*)& __m128d_op0[0]) = 0x0100010000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000490000004d; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffffff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ffffffffff; +- __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x1268f057137a0267; +- *((unsigned long*)& __m128i_op1[0]) = 0x0048137ef886fae0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000006; +- __m128i_out = __lsx_vmini_d(__m128i_op0,6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffffff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ffffffffff; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000073; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000002a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000003a; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000015; +- __m128i_out = __lsx_vavgr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffd10000006459; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000441000000004; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000040400000104; +- *((unsigned long*)& __m256i_result[3]) = 0x0f0f0f0f0f0f6459; +- *((unsigned long*)& __m256i_result[2]) = 0x0f0f44100f0f0f0f; +- *((unsigned long*)& __m256i_result[1]) = 0x0f0f0f0f0f0f0f0f; +- *((unsigned long*)& __m256i_result[0]) = 0x0f0f0f0f0f0f0f0f; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100010000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ffffff00ff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010001000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff00ff00ffffff; +- __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000049; +- *((int*)& __m128_op0[2]) = 0x0000004d; +- *((int*)& __m128_op0[1]) = 0x00000001; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000001; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000001; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x80000001; +- *((int*)& __m128_result[0]) = 0xffffffff; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000490000004d; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000490000004d; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffff9; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000006; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000490000004d; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000073; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000002a; +- *((unsigned long*)& __m128i_result[1]) = 0x00000049000000c0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffff29; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000049000000c0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffff7f00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff007f0101017f; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000006; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffd10000006459; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000441000000004; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000040400000104; +- *((unsigned long*)& __m256i_op1[3]) = 0xdb801b6d0962003f; +- *((unsigned long*)& __m256i_op1[2]) = 0xdb8a3109fe0f0024; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000007fff01ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xdb8e209d0cce025a; +- *((unsigned long*)& __m256i_result[3]) = 0x88888a6d0962002e; +- *((unsigned long*)& __m256i_result[2]) = 0xdb8a3109fe0f0020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000007fff01fffb; +- *((unsigned long*)& __m256i_result[0]) = 0xdb8e20990cce025a; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x88); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000073; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000002a; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ffffff00ff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdb801b6d0962003f; +- *((unsigned long*)& __m256i_op0[2]) = 0xdb8a3109fe0f0024; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000007fff01ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xdb8e209d0cce025a; +- *((unsigned long*)& __m256i_op1[3]) = 0xb70036db12c4007e; +- *((unsigned long*)& __m256i_op1[2]) = 0xb7146213fc1e0049; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000fefe02fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xb71c413b199d04b5; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffcc8000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000007dfdff4b; +- *((unsigned long*)& __m256i_result[3]) = 0xdb801b6d0962003f; +- *((unsigned long*)& __m256i_result[2]) = 0xdb8a3109fe0f0024; +- *((unsigned long*)& __m256i_result[1]) = 0x9a7f997fff01ffff; +- *((unsigned long*)& __m256i_result[0]) = 0xbe632a4f1c3c5653; +- __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xb70036db12c4007e; +- *((unsigned long*)& __m256i_op0[2]) = 0xb7146213fc1e0049; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000fefe02fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xb71c413b199d04b5; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00b7003600120000; +- *((unsigned long*)& __m256i_result[2]) = 0x00b7006200fc0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000fe00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00b7004100190004; +- __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00b7003600120000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00b7006200fc0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000fe00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00b7004100190004; +- *((unsigned long*)& __m256i_op1[3]) = 0xdb801b6d0962003f; +- *((unsigned long*)& __m256i_op1[2]) = 0xdb8a3109fe0f0024; +- *((unsigned long*)& __m256i_op1[1]) = 0x9a7f997fff01ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xbe632a4f1c3c5653; +- *((unsigned long*)& __m256i_result[3]) = 0xffffe54affffffd3; +- *((unsigned long*)& __m256i_result[2]) = 0xffffcfae000000d8; +- *((unsigned long*)& __m256i_result[1]) = 0x00006681000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffd668ffffa9c6; +- __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffcc8000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007dfdff4b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x003ffff300000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000001f7f7f; +- __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000003a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000015; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000049000000c0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; +- *((unsigned long*)& __m128i_result[1]) = 0x00000049000000c0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff29; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000049000000c0; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffff29; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000100000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000c0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000020000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000183fffffe5; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ffff7f00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff007f0101017f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000020000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000183fffffe5; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000073; +- *((unsigned long*)& __m128i_op2[0]) = 0x000000000000002a; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffff7f00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff007f0101017f; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000073; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000010000002b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000400000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffcc8000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007dfdff4b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff3400000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff83ff01; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff01ff3400000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff83ff01; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000183fffffe5; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000400000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000400000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x2b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_result[2]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_result[1]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_result[0]) = 0xbabababababababa; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0xba); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x88888a6d0962002e; +- *((unsigned long*)& __m256i_op0[2]) = 0xdb8a3109fe0f0020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000007fff01fffb; +- *((unsigned long*)& __m256i_op0[0]) = 0xdb8e20990cce025a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xff01ff3400000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff83ff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0962002efe0f0020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff01fffb8667012d; +- __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_op1[2]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_op1[1]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_op1[0]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xbabababababababa; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000c0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001ffffff29; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000000000000c0; +- *((unsigned long*)& __m128i_op2[0]) = 0x00000001ffffff29; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff2900000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdb801b6d0962003f; +- *((unsigned long*)& __m256i_op0[2]) = 0xdb8a3109fe0f0024; +- *((unsigned long*)& __m256i_op0[1]) = 0x9a7f997fff01ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xbe632a4f1c3c5653; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xbabababababababa; +- *((unsigned long*)& __m256d_op0[2]) = 0xbabababababababa; +- *((unsigned long*)& __m256d_op0[1]) = 0xbabababababababa; +- *((unsigned long*)& __m256d_op0[0]) = 0xbabababababababa; +- *((unsigned long*)& __m256d_op1[3]) = 0x88888a6d0962002e; +- *((unsigned long*)& __m256d_op1[2]) = 0xdb8a3109fe0f0020; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000007fff01fffb; +- *((unsigned long*)& __m256d_op1[0]) = 0xdb8e20990cce025a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000400000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff2900000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000401000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff2900000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xa41aa42e; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xa41aa42e; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffcc80; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x7dfdff4b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000005be55bd2; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000401000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000800; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000800; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000800; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000800; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000401000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000401000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0080200000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000401000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000080000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000080000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000080000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000080000000000; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xdb801b6d0962003f; +- *((unsigned long*)& __m256i_op1[2]) = 0xdb8a3109fe0f0024; +- *((unsigned long*)& __m256i_op1[1]) = 0x9a7f997fff01ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xbe632a4f1c3c5653; +- *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_result[2]) = 0x2475cef801f0ffdd; +- *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_result[0]) = 0x419cd5b11c3c5654; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000401000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_hu(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; +- __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_op1[2]) = 0x2475cef801f0ffdd; +- *((unsigned long*)& __m256i_op1[1]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_op1[0]) = 0x419cd5b11c3c5654; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffcc8000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000007dfdff4b; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xbabababababababa; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xbabababababababa; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000005be55bd2; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffcc8000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007dfdff4b; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_op0[2]) = 0x2475cef801f0ffdd; +- *((unsigned long*)& __m256i_op0[1]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_op0[0]) = 0x419cd5b11c3c5654; +- *((unsigned long*)& __m256i_op1[3]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_op1[2]) = 0x2475cef801f0ffdd; +- *((unsigned long*)& __m256i_op1[1]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_op1[0]) = 0x419cd5b11c3c5654; +- *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_result[2]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_result[0]) = 0x6580668200fe0002; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff6; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_op0[2]) = 0x2475cef801f0ffdd; +- *((unsigned long*)& __m256i_op0[1]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_op0[0]) = 0x419cd5b11c3c5654; +- *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_result[2]) = 0x2475cef801f0ffdd; +- *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_result[0]) = 0x419cd5b11c3c5654; +- __m256i_out = __lasx_xvsat_du(__m256i_op0,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffcc8000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff82037dfd0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; +- __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0xbf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256d_op1[1]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff6; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_op0[2]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_op0[1]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_op0[0]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_result[2]) = 0x247fe49409620040; +- *((unsigned long*)& __m256i_result[1]) = 0x6580668200fe0002; +- *((unsigned long*)& __m256i_result[0]) = 0x6580668200fe0002; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x3f3f3f3f3f3f3f3f; +- *((unsigned long*)& __m256i_result[2]) = 0x3f3f3f3f3f3f3f3f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000003f3f3f3f; +- *((unsigned long*)& __m256i_result[0]) = 0x3f3f3f3f00000000; +- __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_h(__m128i_op0,3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffff6; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffff6; +- *((unsigned long*)& __m256i_op2[3]) = 0x3f3f3f3f3f3f3f3f; +- *((unsigned long*)& __m256i_op2[2]) = 0x3f3f3f3f3f3f3f3f; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000003f3f3f3f; +- *((unsigned long*)& __m256i_op2[0]) = 0x3f3f3f3f00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000003f3f3f3c; +- *((unsigned long*)& __m256i_result[2]) = 0xc6c6c6c68787878a; +- *((unsigned long*)& __m256i_result[1]) = 0x000000003f3f3f3c; +- *((unsigned long*)& __m256i_result[0]) = 0x8787878a00000000; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0019081900190019; +- *((unsigned long*)& __m128i_result[0]) = 0x0019081900190019; +- __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffff0000; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007f7f7f7f0000; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- int_op1 = 0x00000000000000ac; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; +- __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xe17cec8fe08008ac; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xe0801f41e0800168; +- *((unsigned long*)& __m256i_op1[3]) = 0x9240f24a84b18025; +- *((unsigned long*)& __m256i_op1[2]) = 0x9240f24a84b18025; +- *((unsigned long*)& __m256i_op1[1]) = 0xb2c0b341807f8006; +- *((unsigned long*)& __m256i_op1[0]) = 0xb2c0b341807f8006; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000012481e4950; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001658166830; +- __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x5b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f7f7f0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xf6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000c0; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000c0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000c0; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000c0; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbsll_v(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00190819; +- *((int*)& __m128_op1[2]) = 0x00190019; +- *((int*)& __m128_op1[1]) = 0x00190819; +- *((int*)& __m128_op1[0]) = 0x00190019; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000c0; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000c0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000c0; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000c0; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000012481e4950; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000001658166830; +- *((unsigned long*)& __m256i_result[3]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; +- __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; +- __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000080; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000080; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007fff7fff; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandi_b(__m128i_op0,0x39); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x3f3f3f3c; +- *((int*)& __m256_op0[5]) = 0xc6c6c6c6; +- *((int*)& __m256_op0[4]) = 0x8787878a; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x3f3f3f3c; +- *((int*)& __m256_op0[1]) = 0x8787878a; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0xffff9c9d00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfff7fff7fff7fff7; +- *((unsigned long*)& __m256i_result[2]) = 0xfff7fff7fff7fff7; +- *((unsigned long*)& __m256i_result[1]) = 0xfff7fff7fff7fff7; +- *((unsigned long*)& __m256i_result[0]) = 0xfff7fff7fff7fff7; +- __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x9240f24a84b18025; +- *((unsigned long*)& __m256i_op0[2]) = 0x9240f24a84b18025; +- *((unsigned long*)& __m256i_op0[1]) = 0xb2c0b341807f8006; +- *((unsigned long*)& __m256i_op0[0]) = 0xb2c0b341807f8006; +- *((unsigned long*)& __m256i_result[3]) = 0x009200f200840080; +- *((unsigned long*)& __m256i_result[2]) = 0x009200f200840080; +- *((unsigned long*)& __m256i_result[1]) = 0x00b200b300800080; +- *((unsigned long*)& __m256i_result[0]) = 0x00b200b300800080; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000003f3f3f3c; +- *((unsigned long*)& __m256i_op0[2]) = 0xc6c6c6c68787878a; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000003f3f3f3c; +- *((unsigned long*)& __m256i_op0[0]) = 0x8787878a00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffff800; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; +- __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m128d_op1[1]) = 0xfffffffffffff800; +- *((unsigned long*)& __m128d_op1[0]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffff800; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffff800; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffff6; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffff6; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000003f3f3f3c; +- *((unsigned long*)& __m256i_op2[2]) = 0xc6c6c6c68787878a; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000003f3f3f3c; +- *((unsigned long*)& __m256i_op2[0]) = 0x8787878a00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe3; +- *((unsigned long*)& __m256i_result[2]) = 0x63636344c3c3c4f6; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffc3; +- *((unsigned long*)& __m256i_result[0]) = 0xc3c3c500fffffff6; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x009200f200840080; +- *((unsigned long*)& __m256i_op0[2]) = 0x009200f200840080; +- *((unsigned long*)& __m256i_op0[1]) = 0x00b200b300800080; +- *((unsigned long*)& __m256i_op0[0]) = 0x00b200b300800080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x009200f200840080; +- *((unsigned long*)& __m256i_result[2]) = 0x009200f200840080; +- *((unsigned long*)& __m256i_result[1]) = 0x00b200b300800080; +- *((unsigned long*)& __m256i_result[0]) = 0x00b200b300800080; +- __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000003f3f3f3c; +- *((unsigned long*)& __m256i_op1[2]) = 0xc6c6c6c68787878a; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000003f3f3f3c; +- *((unsigned long*)& __m256i_op1[0]) = 0x8787878a00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00003f3fc6c68787; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00003f3f87870000; +- __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; +- __m128i_out = __lsx_vfclass_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffff0000; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00003f3f; +- *((int*)& __m256_op1[4]) = 0xc6c68787; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00003f3f; +- *((int*)& __m256_op1[0]) = 0x87870000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffff0000; +- *((int*)& __m128_op0[1]) = 0x00ff0000; +- *((int*)& __m128_op0[0]) = 0x00ff0000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000800; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0xffffffff; +- *((int*)& __m128_op2[2]) = 0xfffff800; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xfffff800; +- *((int*)& __m128_result[1]) = 0x80000000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffe15; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe15; +- __m128i_out = __lsx_vldi(3605); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x9240000000008025; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffff24affff8025; +- *((unsigned long*)& __m256i_op0[1]) = 0xb2c0000000008006; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffb341ffff8006; +- *((unsigned long*)& __m256i_op1[3]) = 0x9240000000008025; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffff24affff8025; +- *((unsigned long*)& __m256i_op1[1]) = 0xb2c0000000008006; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffb341ffff8006; +- *((unsigned long*)& __m256i_result[3]) = 0xff2400000000ff00; +- *((unsigned long*)& __m256i_result[2]) = 0xfffeffe4fffeff00; +- *((unsigned long*)& __m256i_result[1]) = 0xff6400000000ff00; +- *((unsigned long*)& __m256i_result[0]) = 0xfffeff66fffeff00; +- __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffe15; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffe15; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_w(__m128i_op0,1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x009200f200840080; +- *((unsigned long*)& __m256i_op0[2]) = 0x009200f200840080; +- *((unsigned long*)& __m256i_op0[1]) = 0x00b200b300800080; +- *((unsigned long*)& __m256i_op0[0]) = 0x00b200b300800080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffff800; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffc0000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffc0000000000000; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x83); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000e00000080; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000e00000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000e00000080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000e00000080; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xff240000; +- *((int*)& __m256_op0[6]) = 0x0000ff00; +- *((int*)& __m256_op0[5]) = 0xfffeffe4; +- *((int*)& __m256_op0[4]) = 0xfffeff00; +- *((int*)& __m256_op0[3]) = 0xff640000; +- *((int*)& __m256_op0[2]) = 0x0000ff00; +- *((int*)& __m256_op0[1]) = 0xfffeff66; +- *((int*)& __m256_op0[0]) = 0xfffeff00; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff0000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000080; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffe; +- __m256i_out = __lasx_xvaddwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffff800; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x001fffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x4b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f3fc6c68787; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f87870000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00003f3fc6c68787; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00003f3f87870000; +- __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f3fc6c68787; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f87870000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op2[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op2[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op2[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256i_result[2]) = 0x00003e3ec6c68686; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fffffeff; +- *((unsigned long*)& __m256i_result[0]) = 0x00003e3e87870000; +- __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xff2400000000ff00; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffeffe4fffeff00; +- *((unsigned long*)& __m256i_op1[1]) = 0xff6400000000ff00; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffeff66fffeff00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0080808080808080; +- *((unsigned long*)& __m256i_result[2]) = 0x0080808080808080; +- *((unsigned long*)& __m256i_result[1]) = 0x0080808100808080; +- *((unsigned long*)& __m256i_result[0]) = 0x0080808000808080; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfffffffffffff800; +- *((unsigned long*)& __m128d_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000080; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01fe04; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01fe04; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x01010101; +- *((int*)& __m128_op0[2]) = 0x01010101; +- *((int*)& __m128_op0[1]) = 0x01010101; +- *((int*)& __m128_op0[0]) = 0x01010101; +- *((int*)& __m128_result[3]) = 0xc2fa0000; +- *((int*)& __m128_result[2]) = 0xc2fa0000; +- *((int*)& __m128_result[1]) = 0xc2fa0000; +- *((int*)& __m128_result[0]) = 0xc2fa0000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x21); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x000100da000100fd; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001ffe20001fefd; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001009a000100fd; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001ff640001fefd; +- *((unsigned long*)& __m256i_result[3]) = 0x000000edff00fffd; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fff10000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000cdff00fffd; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ff320000ffff; +- __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x47000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_w(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x01010101010000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; +- __m256i_out = __lasx_xvreplve0_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vclz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffff800; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fffefffe; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fffef800; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080807; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080807; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffef; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffef; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; +- __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffintl_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x5f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x01010101010000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffef; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffef; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0100feff0100eeef; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000001010; +- *((unsigned long*)& __m256i_result[1]) = 0x0100feff00feef11; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000001010; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffef; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffef; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0404ffff00000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0404040800000010; +- __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xfffefffe; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xfffefffe; +- *((int*)& __m256_op0[2]) = 0xfffefffd; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffdfffffffe0; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffdfffffffe0; +- __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000100da000100fd; +- *((unsigned long*)& __m256d_op0[2]) = 0x0001ffe20001fefd; +- *((unsigned long*)& __m256d_op0[1]) = 0x0001009a000100fd; +- *((unsigned long*)& __m256d_op0[0]) = 0x0001ff640001fefd; +- *((unsigned long*)& __m256i_result[3]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3ff0000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000c2f90000bafa; +- __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000002020000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000201eff0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000002020000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001fef010; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000000000; +- __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffff800; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0002000400000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020006; +- unsigned_int_result = 0x0000000000020006; +- unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x0); +- *((unsigned long*)& __m256d_op0[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x01010101010000ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x8080808280808082; +- *((unsigned long*)& __m256i_result[2]) = 0x8080808280808082; +- *((unsigned long*)& __m256i_result[1]) = 0x8080808280808080; +- *((unsigned long*)& __m256i_result[0]) = 0x8080808280808082; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000c2f90000bafa; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000fffff800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x80808082; +- *((int*)& __m256_op0[6]) = 0x80808082; +- *((int*)& __m256_op0[5]) = 0x80808082; +- *((int*)& __m256_op0[4]) = 0x80808082; +- *((int*)& __m256_op0[3]) = 0x80808082; +- *((int*)& __m256_op0[2]) = 0x80808080; +- *((int*)& __m256_op0[1]) = 0x80808082; +- *((int*)& __m256_op0[0]) = 0x80808082; +- *((int*)& __m256_op1[7]) = 0x55555555; +- *((int*)& __m256_op1[6]) = 0x55555555; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x55555555; +- *((int*)& __m256_op1[2]) = 0x55555555; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_h(__m256i_op0,14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000100da000100fd; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001ffe20001fefd; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001009a000100fd; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001ff640001fefd; +- *((unsigned long*)& __m256i_op1[3]) = 0x000100da000100fd; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001ffe20001fefd; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001009a000100fd; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001ff640001fefd; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007ff90000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000001ff60000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x8080808280808082; +- *((unsigned long*)& __m256d_op0[2]) = 0x8080808280808082; +- *((unsigned long*)& __m256d_op0[1]) = 0x8080808280808080; +- *((unsigned long*)& __m256d_op0[0]) = 0x8080808280808082; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff00000000; +- __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000c2f90000bafa; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000c2f90000bafa; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000c2fa8000c2fa; +- *((unsigned long*)& __m128i_result[1]) = 0xffff3d06ffff4506; +- *((unsigned long*)& __m128i_result[0]) = 0x7ffffffe7ffff800; +- __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000100da000100fd; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001ffe20001fefd; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001009a000100fd; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001ff640001fefd; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffff3d06ffff4506; +- *((unsigned long*)& __m128d_op0[0]) = 0x7ffffffe7ffff800; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffd; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffff800; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fffff800; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fffff800; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x8a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff3d06ffff4506; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ffffffe7ffff800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xc2f9bafac2fac2fa; +- __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x7ff90000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x1ff60000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xfffffffe; +- *((int*)& __m256_op1[4]) = 0x00000001; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xfffffffe; +- *((int*)& __m256_op1[0]) = 0x00000001; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000001; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000001; +- __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xfffebd06fffe820c; +- *((unsigned long*)& __m128d_op1[0]) = 0x7fff7ffe7fff3506; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256i_result[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256i_result[0]) = 0xff874dc687870000; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc2f9bafac2fac2fa; +- *((unsigned long*)& __m128i_op1[1]) = 0xbdf077eee7e20468; +- *((unsigned long*)& __m128i_op1[0]) = 0xe3b1cc6953e7db29; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000e7e20468; +- *((unsigned long*)& __m128i_result[0]) = 0xc2fac2fa53e7db29; +- __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256d_op0[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256d_op0[0]) = 0xff874dc687870000; +- *((unsigned long*)& __m256d_result[3]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffff8001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffffe; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; +- long_int_result = 0x1f0fdf7f3e3b31d4; +- long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000e7e20468; +- *((unsigned long*)& __m128i_op0[0]) = 0xc2fac2fa53e7db29; +- *((unsigned long*)& __m128i_result[1]) = 0xff84fff4ff84fff4; +- *((unsigned long*)& __m128i_result[0]) = 0x00a6ffceffb60052; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffebd06fffe820c; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7ffe7fff3506; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffebd06fffe820c; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7ffe7fff3506; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff0cffffff18; +- *((unsigned long*)& __m128i_result[0]) = 0xfefffefffeff6a0c; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff84fff4ff84fff4; +- *((unsigned long*)& __m128i_op0[0]) = 0x00a6ffceffb60052; +- unsigned_int_result = 0x0000000000000084; +- unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0xa); +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff0cffffff18; +- *((unsigned long*)& __m128i_op0[0]) = 0xfefffefffeff6a0c; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc2f9bafac2fac2fa; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffefefe6a; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; +- __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op2[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op2[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x61f1000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0108000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x61f1a18100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0108000000000000; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fdf000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fdf000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fdf7fff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fdf7fff00000000; +- __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x35); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; +- *((unsigned long*)& __m128i_result[1]) = 0x7474f6fd7474fefe; +- *((unsigned long*)& __m128i_result[0]) = 0xf474f6fef474f6fe; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x74); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7474f6fd7474fefe; +- *((unsigned long*)& __m128d_op0[0]) = 0xf474f6fef474f6fe; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x01fc03e000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x01fc03e000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff84fff4ff84fff4; +- *((unsigned long*)& __m128i_op0[0]) = 0x00a6ffceffb60052; +- *((unsigned long*)& __m128i_result[1]) = 0xff84fff4ff84fff4; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffefe; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffc2ba; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xff84fff4; +- *((int*)& __m128_op0[2]) = 0xff84fff4; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xfffffff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x41dfffc000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x41dfffdfffc00000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffff0c8000c212; +- *((unsigned long*)& __m128d_op0[0]) = 0xfefffeff7f002d06; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc2f9bafac2fac2fa; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01fc03e000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x01fc03e000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00fffb0402fddf20; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00fffb0402fddf20; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001fbf9fbe29f52; +- *((unsigned long*)& __m256i_result[2]) = 0x5b409c0000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001fbf9fbe29f52; +- *((unsigned long*)& __m256i_result[0]) = 0x5b409c0000000000; +- __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f7e3f; +- *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; +- *((unsigned long*)& __m256i_op1[3]) = 0x41dfffc000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x41dfffdfffc00000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0001fbf9fbe29f52; +- *((unsigned long*)& __m256i_op2[2]) = 0x5b409c0000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0001fbf9fbe29f52; +- *((unsigned long*)& __m256i_op2[0]) = 0x5b409c0000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfbba01c0003f7e3f; +- *((unsigned long*)& __m256i_result[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256i_result[1]) = 0xfbd884e7003f7e3f; +- *((unsigned long*)& __m256i_result[0]) = 0xff874dc687870000; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff84fff4ff84fff4; +- *((unsigned long*)& __m128i_op1[0]) = 0x00a6ffceffb60052; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xff84fff4ff84fff4; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfbba01c0003f7e3f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256i_op1[1]) = 0xfbd884e7003f7e3f; +- *((unsigned long*)& __m256i_op1[0]) = 0xff874dc687870000; +- *((unsigned long*)& __m256i_result[3]) = 0xfbba01c0003f7e3f; +- *((unsigned long*)& __m256i_result[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256i_result[1]) = 0xfbd884e7003f7e3f; +- *((unsigned long*)& __m256i_result[0]) = 0xff874dc687870000; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00fffb04; +- *((int*)& __m256_op0[6]) = 0x02fddf20; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00fffb04; +- *((int*)& __m256_op0[2]) = 0x02fddf20; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x41dfffc0; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x41dfffdf; +- *((int*)& __m256_op1[2]) = 0xffc00000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff84fff4ff84fff4; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_op1[1]) = 0xff84fff4ff84fff4; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_result[1]) = 0xff84fff4ff84fff4; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; +- __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xff84fff4; +- *((int*)& __m128_op0[2]) = 0xff84fff4; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xfffffff0; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xff84fff4; +- *((int*)& __m128_op0[2]) = 0xff84fff4; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xfffffff0; +- *((int*)& __m128_op1[3]) = 0xff84fff4; +- *((int*)& __m128_op1[2]) = 0xff84fff4; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xfffffff0; +- *((int*)& __m128_result[3]) = 0xffc4fff4; +- *((int*)& __m128_result[2]) = 0xffc4fff4; +- *((int*)& __m128_result[1]) = 0xffffffff; +- *((int*)& __m128_result[0]) = 0xfffffff0; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_d(__m128i_op0,-4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfbba01c0003f7e3f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256i_op1[1]) = 0xfbd884e7003f7e3f; +- *((unsigned long*)& __m256i_op1[0]) = 0xff874dc687870000; +- *((unsigned long*)& __m256i_result[3]) = 0xffe367cc82f8989a; +- *((unsigned long*)& __m256i_result[2]) = 0x4f90000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffc3aaa8d58f43c8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffe367cc82f8989a; +- *((unsigned long*)& __m256d_op0[2]) = 0x4f90000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffc3aaa8d58f43c8; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256d_op1[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256d_op1[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; +- __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m256_op0[7]) = 0x1f0fdf7f; +- *((int*)& __m256_op0[6]) = 0x3e3b31d4; +- *((int*)& __m256_op0[5]) = 0x7ff80000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x1f0fdf7f; +- *((int*)& __m256_op0[2]) = 0x3e3b31d4; +- *((int*)& __m256_op0[1]) = 0x7ff80000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0x7ff80000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0x7ff80000; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x30); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fefefe68; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000001; +- *((int*)& __m128_op0[2]) = 0xfffffffe; +- *((int*)& __m128_op0[1]) = 0x00000001; +- *((int*)& __m128_op0[0]) = 0xfffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x2a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1f0fdf7f3e3b31d4; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xe0f02081c1c4ce2c; +- *((unsigned long*)& __m256i_result[2]) = 0x8008000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xe0f02081c1c4ce2c; +- *((unsigned long*)& __m256i_result[0]) = 0x8008000000000000; +- __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffe367cc82f8989a; +- *((unsigned long*)& __m256i_op1[2]) = 0x4f90000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffc3aaa8d58f43c8; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000082f8989a; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000d58f43c8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x1f0fdf7f; +- *((int*)& __m256_op0[6]) = 0x3e3b31d4; +- *((int*)& __m256_op0[5]) = 0x7ff80000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x1f0fdf7f; +- *((int*)& __m256_op0[2]) = 0x3e3b31d4; +- *((int*)& __m256_op0[1]) = 0x7ff80000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x002a5429; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x002a5429; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc7418a023680; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff8845bb954b00; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffc7418a023680; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_result[1]) = 0xffff8845bb954b00; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000002a5429; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000082f8989a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000d58f43c8; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010183f9999b; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x01010101d58f43c9; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffc741; +- *((int*)& __m256_op0[6]) = 0x8a023680; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffff8845; +- *((int*)& __m256_op0[2]) = 0xbb954b00; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffc74180000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff884580000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001fffffffe; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc74180000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff884580000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0xbf800000; +- *((int*)& __m256_result[6]) = 0xbf800000; +- *((int*)& __m256_result[5]) = 0xd662fa00; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0xbf800000; +- *((int*)& __m256_result[2]) = 0xbf800000; +- *((int*)& __m256_result[1]) = 0xd6ef7500; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xbf800000bf800000; +- *((unsigned long*)& __m256i_op1[2]) = 0xd662fa0000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xbf800000bf800000; +- *((unsigned long*)& __m256i_op1[0]) = 0xd6ef750000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x417e01f040800000; +- *((unsigned long*)& __m256i_result[2]) = 0x299d060000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x417e01f040800000; +- *((unsigned long*)& __m256i_result[0]) = 0x29108b0000000000; +- __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe0f02081c1c4ce2c; +- *((unsigned long*)& __m256i_op0[2]) = 0x8008000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xe0f02081c1c4ce2c; +- *((unsigned long*)& __m256i_op0[0]) = 0x8008000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000b8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000b8; +- __m256i_out = __lasx_xvmskltz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x60f02081c1c4ce2c; +- *((unsigned long*)& __m256i_op0[2]) = 0x8008000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x60f02081c1c4ce2c; +- *((unsigned long*)& __m256i_op0[0]) = 0x8008000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010183f9999b; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x01010101d58f43c9; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010183f9999b; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x01010101d58f43c9; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000002a54290; +- __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256d_op1[3]) = 0x0101010183f9999b; +- *((unsigned long*)& __m256d_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[1]) = 0x01010101d58f43c9; +- *((unsigned long*)& __m256d_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_op1[3]) = 0x417e01f040800000; +- *((unsigned long*)& __m256i_op1[2]) = 0x299d060000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x417e01f040800000; +- *((unsigned long*)& __m256i_op1[0]) = 0x29108b0000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001000000010; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffefefe6a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fefefe6a; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x7c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000fefefe6a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fefefe6a; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fefefe6a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000fbf9; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000fbf9; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000007f00000000; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_result[1]) = 0x5a5a5a5a5b5a5b5a; +- *((unsigned long*)& __m128i_result[0]) = 0x5a5a5a5a5b5a5b5a; +- __m128i_out = __lsx_vxori_b(__m128i_op0,0x5a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x5a5a5a5a5b5a5b5a; +- *((unsigned long*)& __m128i_op1[0]) = 0x5a5a5a5a5b5a5b5a; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001494b494a; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001494b494a; +- __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000fefefe6a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000007070700; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000002010202; +- __m128i_out = __lsx_vclo_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a5429; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000055; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000055; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x002a542a; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x002a542a; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffffffe; +- __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000007070700; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000002010202; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000007070700; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000002010202; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010183f95466; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x01010101d58efe94; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010183f95466; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x01010101d58efe94; +- __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xa7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000055; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000055; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff295329; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff295329; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff01010101; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00d6acd7; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff01010101; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00d6acd7; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010183f95466; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x01010101d58efe94; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000101000083f95; +- *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; +- *((unsigned long*)& __m256i_result[1]) = 0x00001010000d58f0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; +- __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000007f00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7ffffffeffffffff; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_h(__m128i_op0,10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffff295329; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffff295329; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[3]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_result[2]) = 0x001f001f02c442af; +- *((unsigned long*)& __m256i_result[1]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_result[0]) = 0x001f001f02c442af; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x7ffffffe; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); +- *((unsigned long*)& __m128i_op0[1]) = 0x00005a5a00005a5a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00005b5a00005b5a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5a5a5a5a5b5a5b5a; +- *((unsigned long*)& __m128i_op0[0]) = 0x5a5a5a5a5b5a5b5a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000005400; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000005400; +- __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000000fefefe6a; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000c2bac2c2; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000fefefe6a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c2bac2c2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fefefe6a; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c2bac2c2; +- __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00fe01f000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x00fe01f000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000007f8; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x2d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op0[2]) = 0x001f001f02c442af; +- *((unsigned long*)& __m256i_op0[1]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op0[0]) = 0x001f001f02c442af; +- *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffff295329; +- *((unsigned long*)& __m256i_op2[1]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffff295329; +- *((unsigned long*)& __m256i_result[3]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_result[1]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000c40086; +- __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x7ffffffeffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x01ff01ff01ff01ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x01ff01ff01ff01ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_du_wu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000002a542a; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x7ffffffe; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrml_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xfefefeff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xff295329; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xfefefeff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xff295329; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000004290; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000004290; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000004290; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000004290; +- __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004290; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004290; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004290; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004290; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000004290; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000002a96ba; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000004290; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000002a96ba; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vclz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4080808080808080; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff295329; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff295329; +- *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_result[3]) = 0xfffe00f7ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffff629d7; +- *((unsigned long*)& __m256i_result[1]) = 0xfffe00f7ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffff629d7; +- __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x001e001ea1bfa1bf; +- *((unsigned long*)& __m256d_op0[2]) = 0x001e001e83e5422e; +- *((unsigned long*)& __m256d_op0[1]) = 0x001e001ea1bfa1bf; +- *((unsigned long*)& __m256d_op0[0]) = 0x011f011f0244420e; +- *((unsigned long*)& __m256d_op1[3]) = 0xfffe00f7ffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffff629d7; +- *((unsigned long*)& __m256d_op1[1]) = 0xfffe00f7ffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffff629d7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x01ff01ff; +- *((int*)& __m128_op0[2]) = 0x01ff01ff; +- *((int*)& __m128_op0[1]) = 0x01ff01ff; +- *((int*)& __m128_op0[0]) = 0x01ff01ff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x01ff01ff; +- *((int*)& __m128_op0[2]) = 0x01ff01ff; +- *((int*)& __m128_op0[1]) = 0x01ff01ff; +- *((int*)& __m128_op0[0]) = 0x01ff01ff; +- *((int*)& __m128_result[3]) = 0xc2f80000; +- *((int*)& __m128_result[2]) = 0xc2f80000; +- *((int*)& __m128_result[1]) = 0xc2f80000; +- *((int*)& __m128_result[0]) = 0xc2f80000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ffffffeffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0xff80ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7ffffffeffffffff; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xe6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xff80ffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x7ffffffe; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000083f95466; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010100005400; +- *((unsigned long*)& __m256i_op1[3]) = 0x001e001ea1bfa1bf; +- *((unsigned long*)& __m256i_op1[2]) = 0x001e001e83e5422e; +- *((unsigned long*)& __m256i_op1[1]) = 0x001e001ea1bfa1bf; +- *((unsigned long*)& __m256i_op1[0]) = 0x011f011f0244420e; +- *((unsigned long*)& __m256i_result[3]) = 0x000f000fd0dfd0df; +- *((unsigned long*)& __m256i_result[2]) = 0x000f000f83ef4b4a; +- *((unsigned long*)& __m256i_result[1]) = 0x000f000fd0dfd0df; +- *((unsigned long*)& __m256i_result[0]) = 0x0110011001224b07; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x83f95466; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x01010101; +- *((int*)& __m256_op0[0]) = 0x00005400; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xfefefeff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xff295329; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xfefefeff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xff295329; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000004290; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000002a96ba; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000004290; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000002a96ba; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000083f95466; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0101010100005400; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000004290; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000083f95466; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000004290; +- *((unsigned long*)& __m256d_result[0]) = 0x0101010100005400; +- __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; +- *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000002a5; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000002a5; +- __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff295329; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefeff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff295329; +- *((unsigned long*)& __m256i_op1[3]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op1[1]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_result[3]) = 0x007f00f8ff7fff80; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fff6a9d8; +- *((unsigned long*)& __m256i_result[1]) = 0x007f00f8ff7fff80; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fff6a9d8; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x01ff01ff01ff01ff; +- *((unsigned long*)& __m128d_op0[0]) = 0x01ff01ff01ff01ff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x01ff01ff01ff01ff; +- *((unsigned long*)& __m128d_result[0]) = 0x01ff01ff01ff01ff; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000083f95466; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010100005400; +- *((unsigned long*)& __m256i_op1[3]) = 0x007f00f8ff7fff80; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff6a9d8; +- *((unsigned long*)& __m256i_op1[1]) = 0x007f00f8ff7fff80; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff6a9d8; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x007f00f8ff7fff80; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff6a9d8; +- *((unsigned long*)& __m256i_op1[1]) = 0x007f00f8ff7fff80; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff6a9d8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000089; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; +- __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x02a54290; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x02a54290; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x02a54290; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x0154dc84; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x02a54290; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000089; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x82a54290; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x028aa700; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x82a54290; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x02a54287; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000001ff000001ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000001ff000001ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000001ff000001ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000001ff000001ff; +- *((unsigned long*)& __m128i_op2[1]) = 0xff80ffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x7ffffffeffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000002fe800000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x7ffffe0100000000; +- __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000089; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; +- __m128i_out = __lsx_vaddi_hu(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00fe01f0; +- *((int*)& __m256_op0[6]) = 0x00010000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00c40086; +- *((int*)& __m256_op0[3]) = 0x00fe01f0; +- *((int*)& __m256_op0[2]) = 0x00010000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00c40086; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x82a54290; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x028aa700; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x82a54290; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x02a54287; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00010000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00c40086; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00010000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00c40086; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000082a54290; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000028aa700; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000082a54290; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54287; +- *((unsigned long*)& __m256i_result[3]) = 0x007f00f841532148; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001a753c3; +- *((unsigned long*)& __m256i_result[1]) = 0x007f00f841532148; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001b52187; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00fe01f0; +- *((int*)& __m256_op0[6]) = 0x00010000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00c40086; +- *((int*)& __m256_op0[3]) = 0x00fe01f0; +- *((int*)& __m256_op0[2]) = 0x00010000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00c40086; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000089; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; +- __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x98ff98ff220e220d; +- *((unsigned long*)& __m128d_op0[0]) = 0xa2e1a2601ff01ff0; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vfrintrz_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000082a54290; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000028aa700; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000082a54290; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54287; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x803f800080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xe0404041c0404040; +- int_op1 = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128i_result[0]) = 0xe0404041e0404041; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000400; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000002a542a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000242; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000242; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_op0[1]) = 0x00fe01f000010000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000c40086; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000c40086; +- __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128i_op0[0]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128i_op1[1]) = 0x803f800080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xe0404041c0404040; +- *((unsigned long*)& __m128i_result[1]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128i_result[0]) = 0x803f800080000000; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128d_op0[0]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128i_result[1]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128i_result[0]) = 0xe0404041e0404041; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000002a54290; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; +- *((int*)& __m128_result[3]) = 0x35200000; +- *((int*)& __m128_result[2]) = 0x35200000; +- *((int*)& __m128_result[1]) = 0x35200000; +- *((int*)& __m128_result[0]) = 0x35200000; +- __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000002a54290; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000002a54290; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000089; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128i_op0[0]) = 0x803f800080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000000e; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000009; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128i_op0[0]) = 0x803f800080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xff80ffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x7ffffffe; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xe0404041e0404041; +- *((unsigned long*)& __m128i_op1[0]) = 0x803f800080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x02a54290; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0154dc84; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x02a54290; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000089; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x02a54290; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x0154dc84; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x02a54290; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000089; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x02a54290; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x0154dc84; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x02a54290; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000089; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x59); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000089; +- __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[1]) = 0x0a00000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbsll_v(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000154dc84; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000089; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff00000089; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002a54290; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op2[1]) = 0x000000004c7f4c7f; +- *((unsigned long*)& __m128i_op2[0]) = 0xe0c0c0c0d1c7d1c6; +- *((unsigned long*)& __m128i_result[1]) = 0x061006100613030c; +- *((unsigned long*)& __m128i_result[0]) = 0x4d6814ef9c77ce46; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0xfebcfebcfebcfebc; +- *((unsigned long*)& __m256i_result[2]) = 0xfebcfebcfebcfebc; +- *((unsigned long*)& __m256i_result[1]) = 0xfebcfebcfebcfebc; +- *((unsigned long*)& __m256i_result[0]) = 0xfebcfebcfebcfebc; +- __m256i_out = __lasx_xvldi(1724); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x061006100613030c; +- *((unsigned long*)& __m128i_op1[0]) = 0x4d6814ef9c77ce46; +- *((unsigned long*)& __m128i_result[1]) = 0x010f010f0112010b; +- *((unsigned long*)& __m128i_result[0]) = 0x016701ee01760145; +- __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000001fe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000001fe; +- __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000fd0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000fd0000; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x29); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0a000a000a000a00; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[1]) = 0xfff6fff6fff6fff6; +- *((unsigned long*)& __m128i_result[0]) = 0xfff6fff6fff6fff6; +- __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x010f00000111fffc; +- *((unsigned long*)& __m128i_op0[0]) = 0x016700dc0176003a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0a000a000a000a00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x4d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; +- *((unsigned long*)& __m256d_op1[2]) = 0xa5a5a5a5a5a5a5ff; +- *((unsigned long*)& __m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; +- *((unsigned long*)& __m256d_op1[0]) = 0xa5a5a5a5a5a5a5ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x36); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; +- __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fd0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fd0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001b0000001b; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001b00fd0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001b0000001b; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001b00fd0000; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000001b0000001b; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001b00fd0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000001b0000001b; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001b00fd0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000fd0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fd0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000007f0000; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fe0100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fe0100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000a0000000a; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000a00000009; +- *((unsigned long*)& __m128i_result[1]) = 0x000a000a0000000a; +- *((unsigned long*)& __m128i_result[0]) = 0x000a000a000a000a; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xaf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; +- *((unsigned long*)& __m256d_op1[2]) = 0xa5a5a5a5a5a99e03; +- *((unsigned long*)& __m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; +- *((unsigned long*)& __m256d_op1[0]) = 0xa5a5a5a5a5a99e03; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000a0000000a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000a00000009; +- *((unsigned long*)& __m128i_result[1]) = 0x0a0a0a000a0a0a00; +- *((unsigned long*)& __m128i_result[0]) = 0x0a0a0a0009090900; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fe0100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fe0100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000a000a00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000a000a00000000; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000feb60000b7d0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000feb60000c7eb; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000feb60000b7d0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000feb60000c7eb; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0a0a0a000a0a0a00; +- *((unsigned long*)& __m128d_op1[0]) = 0x0a0a0a0009090900; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000feb60000b7d0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000feb60000c7eb; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000feb60000b7d0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000feb60000c7eb; +- *((unsigned long*)& __m256i_result[3]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_result[2]) = 0x0707feb60707c7eb; +- *((unsigned long*)& __m256i_result[1]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_result[0]) = 0x0707feb60707c7eb; +- __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000003fff3fff; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op0[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_op1[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m128i_result[1]) = 0x0040000000400000; +- *((unsigned long*)& __m128i_result[0]) = 0x0040000000400000; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001900000019; +- __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000fe0100000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000fe0100000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000001900000019; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000001900000019; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000001900000019; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0040000000400000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0040000000400000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_result[0]) = 0x0141010101410101; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_op1[0]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_result[1]) = 0xfebffefffebffeff; +- *((unsigned long*)& __m128i_result[0]) = 0xfebffefffebffeff; +- __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op0[2]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_op0[1]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op0[0]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_result[2]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_result[1]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_result[0]) = 0x45baa7ef6a95a985; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_op1[0]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6420e0208400c4c4; +- *((unsigned long*)& __m128i_op0[0]) = 0x20c4e0c4e0da647a; +- *((unsigned long*)& __m128i_result[1]) = 0x6420e0208400c4e3; +- *((unsigned long*)& __m128i_result[0]) = 0x20c4e0c4e0da6499; +- __m128i_out = __lsx_vaddi_du(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x6420e020; +- *((int*)& __m128_op0[2]) = 0x8400c4e3; +- *((int*)& __m128_op0[1]) = 0x20c4e0c4; +- *((int*)& __m128_op0[0]) = 0xe0da6499; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_op1[1]) = 0xfebffefffebffeff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfebffefffebffeff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000001b0000001b; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001b00fd0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000001b0000001b; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001b00fd0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000019; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000019; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0707feb608c9328b; +- *((unsigned long*)& __m256i_op0[2]) = 0xc237bd65fc892985; +- *((unsigned long*)& __m256i_op0[1]) = 0x0707feb608c9328b; +- *((unsigned long*)& __m256i_op0[0]) = 0xc237bd65fc892985; +- *((unsigned long*)& __m256i_op1[3]) = 0x00150015003a402f; +- *((unsigned long*)& __m256i_op1[2]) = 0x333568ce26dcd055; +- *((unsigned long*)& __m256i_op1[1]) = 0x00150015003a402f; +- *((unsigned long*)& __m256i_op1[0]) = 0x333568ce26dcd055; +- *((unsigned long*)& __m256i_result[3]) = 0x0e0f1192846ff912; +- *((unsigned long*)& __m256i_result[2]) = 0x002a0074666a4db9; +- *((unsigned long*)& __m256i_result[1]) = 0x0e0f1192846ff912; +- *((unsigned long*)& __m256i_result[0]) = 0x002a0074666a4db9; +- __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_result[1]) = 0x4101010141010100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbsll_v(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00150015003a402f; +- *((unsigned long*)& __m256i_op0[2]) = 0x333568ce26dcd055; +- *((unsigned long*)& __m256i_op0[1]) = 0x00150015003a402f; +- *((unsigned long*)& __m256i_op0[0]) = 0x333568ce26dcd055; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000007d0d0d0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000007d0d0d0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000007d0d0d0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000007d0d0d0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000007d0d0d00000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000007d0d0d00000; +- __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000001b0000001b; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001b00fd0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001b0000001b; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001b00fd0000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000001b; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000001b; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000001b; +- *((unsigned long*)& __m256i_result[0]) = 0x000000fd00000000; +- __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000be00be; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x1f1b917c9f3d5e05; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0e0f1192846ff912; +- *((unsigned long*)& __m256i_op0[2]) = 0x002a0074666a4db9; +- *((unsigned long*)& __m256i_op0[1]) = 0x0e0f1192846ff912; +- *((unsigned long*)& __m256i_op0[0]) = 0x002a0074666a4db9; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000018; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000018; +- *((unsigned long*)& __m256i_result[3]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000100000018; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000100000018; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x1f60000000c00000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x1f60000000c00000; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x1f1b917c; +- *((int*)& __m128_op0[0]) = 0x9f3d5e05; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x4fa432d6; +- *((int*)& __m128_result[0]) = 0x7fc00000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1f60000000c00000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1f60000000c00000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x60000000c0000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x60000000c0000000; +- __m256i_out = __lasx_xvslli_h(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007fff003f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff003f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000627; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000627; +- __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_result[3]) = 0x400040003abf4000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_result[1]) = 0x400040003abf4000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000003fff3fff; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000627; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000627; +- *((unsigned long*)& __m256i_op2[3]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000003fff3fff; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_b(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007fff003f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff003f; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fff; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000627; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000627; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1f60000000c00000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x1f60000000c00000; +- *((unsigned long*)& __m256i_op2[3]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000627; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000627; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4180418041804180; +- __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128d_op0[0]) = 0x4fa432d67fc00000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0141010101410101; +- *((unsigned long*)& __m128d_op1[0]) = 0x0141010101410101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0408040800000004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0408040800000004; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000084; +- *((unsigned long*)& __m256i_result[3]) = 0x0084008400840084; +- *((unsigned long*)& __m256i_result[2]) = 0x0084008400840084; +- *((unsigned long*)& __m256i_result[1]) = 0x0084008400840084; +- *((unsigned long*)& __m256i_result[0]) = 0x0084008400840084; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff05407fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00001fff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00001fff; +- __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op1[2]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_op1[1]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op1[0]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000800; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000100001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000100001; +- __m256i_out = __lasx_xvclz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3e1f321529232736; +- *((unsigned long*)& __m128i_op1[0]) = 0x161d0c373c200826; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000082020201; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000820200000201; +- __m128i_out = __lsx_vexth_wu_hu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x65b780a3ae3bf8cb; +- *((unsigned long*)& __m128i_op0[0]) = 0x161d0c363c200826; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x65b780a2ae3bf8ca; +- *((unsigned long*)& __m128i_result[0]) = 0x161d0c373c200827; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x3fff3fff; +- *((int*)& __m256_op0[6]) = 0x3fff3fff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x3fff3fff; +- *((int*)& __m256_op0[3]) = 0x3fff3fff; +- *((int*)& __m256_op0[2]) = 0x3fff3fff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x3fff3fff; +- *((int*)& __m256_op1[7]) = 0x017e01fe; +- *((int*)& __m256_op1[6]) = 0x01fe01fe; +- *((int*)& __m256_op1[5]) = 0x05860606; +- *((int*)& __m256_op1[4]) = 0x01fe0202; +- *((int*)& __m256_op1[3]) = 0x017e01fe; +- *((int*)& __m256_op1[2]) = 0x01fe0000; +- *((int*)& __m256_op1[1]) = 0x05860606; +- *((int*)& __m256_op1[0]) = 0x01fe0004; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x017e01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0586060601fe0202; +- *((unsigned long*)& __m256i_op1[1]) = 0x017e01fe01fe0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0586060601fe0004; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffbfffafffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffbfffaffff0000; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x65b780a2ae3bf8ca; +- *((unsigned long*)& __m128i_op1[0]) = 0x161d0c373c200827; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000001ff; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x017e01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0586060601fe0202; +- *((unsigned long*)& __m256i_op0[1]) = 0x017e01fe01fe0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0586060601fe0004; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010001000100001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010001000100001; +- *((unsigned long*)& __m256i_result[3]) = 0x017f01fe01ff01fe; +- *((unsigned long*)& __m256i_result[2]) = 0x05960616020e0203; +- *((unsigned long*)& __m256i_result[1]) = 0x017f01fe01ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x05960616020e0005; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x017f01fe01ff01fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x05960616020e0203; +- *((unsigned long*)& __m256i_op0[1]) = 0x017f01fe01ff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x05960616020e0005; +- *((unsigned long*)& __m256i_op1[3]) = 0x017f01fe01ff01fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x05960616020e0203; +- *((unsigned long*)& __m256i_op1[1]) = 0x017f01fe01ff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x05960616020e0005; +- *((unsigned long*)& __m256i_result[3]) = 0x00fe01fc01fe01fc; +- *((unsigned long*)& __m256i_result[2]) = 0x012c002c001c0006; +- *((unsigned long*)& __m256i_result[1]) = 0x00fe01fc01fe0000; +- *((unsigned long*)& __m256i_result[0]) = 0x012c002c001c000a; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff4000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000403f3fff; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffbfffafffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffbfffaffff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00fe01fc01fe01fc; +- *((unsigned long*)& __m256i_op1[2]) = 0x012c002c001c0006; +- *((unsigned long*)& __m256i_op1[1]) = 0x00fe01fc01fe0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x012c002c001c000a; +- *((unsigned long*)& __m256i_result[3]) = 0x807e80fd80fe80fd; +- *((unsigned long*)& __m256i_result[2]) = 0x80938013800d8002; +- *((unsigned long*)& __m256i_result[1]) = 0x807e80fd80fe0000; +- *((unsigned long*)& __m256i_result[0]) = 0x80938013800d0005; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00fe01fc01fe01fc; +- *((unsigned long*)& __m256i_op0[2]) = 0x012c002c001c0006; +- *((unsigned long*)& __m256i_op0[1]) = 0x00fe01fc01fe0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x012c002c001c000a; +- long_int_result = 0x00fe01fc01fe0000; +- long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x4101010141010100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x807e80fd80fe80fd; +- *((unsigned long*)& __m256i_op0[2]) = 0x80938013800d8002; +- *((unsigned long*)& __m256i_op0[1]) = 0x807e80fd80fe0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x80938013800d0005; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffff00001fff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffff00001fff; +- *((unsigned long*)& __m256i_result[3]) = 0x807e80fd80fe80fd; +- *((unsigned long*)& __m256i_result[2]) = 0x80938013800d8002; +- *((unsigned long*)& __m256i_result[1]) = 0x807e80fd80fe0000; +- *((unsigned long*)& __m256i_result[0]) = 0x80938013800d0005; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff4000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000403f3fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op1[2]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_op1[1]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op1[0]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_result[3]) = 0x38f7414938f7882f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x38f7414938f78830; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x64b680a2ae3af8ca; +- *((unsigned long*)& __m128i_op0[0]) = 0x161c0c363c200826; +- *((unsigned long*)& __m128i_result[1]) = 0x64b680a2ae3af8c8; +- *((unsigned long*)& __m128i_result[0]) = 0x161c0c363c200824; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x64b680a2ae3af8c8; +- *((unsigned long*)& __m128i_op1[0]) = 0x161c0c363c200824; +- *((unsigned long*)& __m128i_result[1]) = 0x23b57fa16d39f7c8; +- *((unsigned long*)& __m128i_result[0]) = 0x161c0c363c200824; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001ff; +- *((unsigned long*)& __m128i_result[1]) = 0x4101010141010100; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000001ff; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x807e80fd80fe80fd; +- *((unsigned long*)& __m256i_op0[2]) = 0x80938013800d8002; +- *((unsigned long*)& __m256i_op0[1]) = 0x807e80fd80fe0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x80938013800d0005; +- *((unsigned long*)& __m256i_result[3]) = 0x8091811081118110; +- *((unsigned long*)& __m256i_result[2]) = 0x80a6802680208015; +- *((unsigned long*)& __m256i_result[1]) = 0x8091811081110013; +- *((unsigned long*)& __m256i_result[0]) = 0x80a6802680200018; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x0707feb6; +- *((int*)& __m256_op0[6]) = 0x0707b7d0; +- *((int*)& __m256_op0[5]) = 0x45baa7ef; +- *((int*)& __m256_op0[4]) = 0x6a95a985; +- *((int*)& __m256_op0[3]) = 0x0707feb6; +- *((int*)& __m256_op0[2]) = 0x0707b7d0; +- *((int*)& __m256_op0[1]) = 0x45baa7ef; +- *((int*)& __m256_op0[0]) = 0x6a95a985; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000017547fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000017547fffffff; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0408040800008003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0408040800008003; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0408040800008002; +- *((unsigned long*)& __m256i_result[0]) = 0xfbf7fbf7ffff7ffd; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x23b57fa16d39f7c8; +- *((unsigned long*)& __m128i_op1[0]) = 0x161c0c363c200824; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; +- __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x34); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000017547fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000017547fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x807e80fd80fe80fd; +- *((unsigned long*)& __m256i_op1[2]) = 0x80938013800d8002; +- *((unsigned long*)& __m256i_op1[1]) = 0x807e80fd80fe0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x80938013800d0005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000801380f380fe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000801380f300fb; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4101010141010100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0020808100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x29); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x38f7414938f7882f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x38f7414938f78830; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000801380f380fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000801380f300fb; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x2c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0408040800008003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0408040800008003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff80800; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0408040800008003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x04080408fff87803; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000800; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000801380f380fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000801380f300fb; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff7fedffffff05; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0000fffd; +- *((int*)& __m128_op1[3]) = 0x7fffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000001ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op1[2]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_op1[1]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op1[0]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_result[3]) = 0x0707b7cff8f84830; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000354ad4c28; +- *((unsigned long*)& __m256i_result[1]) = 0x0707b7cff8f84830; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000354ad4c28; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff82bb9784; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc6bb97ac; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007ffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0408040800008003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x04080408fff87803; +- *((unsigned long*)& __m256i_op1[3]) = 0x0707b7cff8f84830; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000354ad4c28; +- *((unsigned long*)& __m256i_op1[1]) = 0x0707b7cff8f84830; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000354ad4c28; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffd5a98; +- __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffd5a98; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffd5a98; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000007f3a40; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0020808100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff4000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000403f3fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x3fff3fff3fff4000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000403f3fff; +- *((unsigned long*)& __m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007ffe7ffe; +- *((unsigned long*)& __m256i_result[1]) = 0x7ffe7ffe7ffe8000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000807e7ffe; +- __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fff3fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x3fff3fff3fff4000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000403f3fff; +- *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[2]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[0]) = 0x3fff3fff3fff3fff; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffd5a98; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000101ff01; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fffd; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff000000ff; +- __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8091811081118110; +- *((unsigned long*)& __m256i_op0[2]) = 0x80a6802680208015; +- *((unsigned long*)& __m256i_op0[1]) = 0x8091811081110013; +- *((unsigned long*)& __m256i_op0[0]) = 0x80a6802680200018; +- *((unsigned long*)& __m256i_op1[3]) = 0x8091811081118110; +- *((unsigned long*)& __m256i_op1[2]) = 0x80a6802680208015; +- *((unsigned long*)& __m256i_op1[1]) = 0x8091811081110013; +- *((unsigned long*)& __m256i_op1[0]) = 0x80a6802680200018; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000101ff01; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffff6fffffff6; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff6fffffff6; +- __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff00000000000001; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfe80000000000001; +- __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000101ff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007ffe7ffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ffe7ffe7ffe8000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000807e7ffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x8091811081118110; +- *((unsigned long*)& __m256i_op1[2]) = 0x80a6802680208015; +- *((unsigned long*)& __m256i_op1[1]) = 0x8091811081110013; +- *((unsigned long*)& __m256i_op1[0]) = 0x80a6802680200018; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffefffe0000feff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffeff0000007e7f; +- __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007ffe7ffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ffe7ffe7ffe8000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000807e7ffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ffe7ffe7ffe7ffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007ffe7ffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ffe7ffe7ffe8000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000807e7ffe; +- *((unsigned long*)& __m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007ffe7ffe; +- *((unsigned long*)& __m256i_result[1]) = 0x7ffe7ffe7ffe8000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000807e7ffe; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000801380f380fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000801380f300fb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000008013; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000080f3; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fb; +- __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op0[2]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_op0[1]) = 0x0707feb60707b7d0; +- *((unsigned long*)& __m256i_op0[0]) = 0x45baa7ef6a95a985; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ffe7ffd7ffe7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ffe7ffd7ffe8001; +- *((unsigned long*)& __m256i_result[3]) = 0x0707feb70707b7d1; +- *((unsigned long*)& __m256i_result[2]) = 0x65baa7efea95a985; +- *((unsigned long*)& __m256i_result[1]) = 0x0707feb70707b7d1; +- *((unsigned long*)& __m256i_result[0]) = 0x65baa7ef6a95a987; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff000000000000; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000007f3a40; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff82bb9784; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffc6bb97ac; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff82bb9784; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffc6bb97ac; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000004000000; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff82bb9784; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc6bb97ac; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x7fffffff82bb9784; +- *((unsigned long*)& __m128i_op2[0]) = 0x7fffffffc6bb97ac; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff82bb9784; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffc6bb97ac; +- __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xfe800000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((int*)& __m128_op1[3]) = 0x7fffffff; +- *((int*)& __m128_op1[2]) = 0x82bb9784; +- *((int*)& __m128_op1[1]) = 0x7fffffff; +- *((int*)& __m128_op1[0]) = 0xc6bb97ac; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe80000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfe80000000000001; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000027f000000fe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000018000000000; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_d(__m128i_op0,5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000007f3a40; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f3a40; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000d24; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000801380f380fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000801380f300fb; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f3a40; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x42); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00fe000000000000; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007ffe7ffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ffe7ffe7ffe8000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000807e7ffe; +- *((unsigned long*)& __m256i_result[3]) = 0x7f7e7f7e7f7e7f7e; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007f7e7f7e; +- *((unsigned long*)& __m256i_result[1]) = 0x7f7e7f7e7f7e0000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000007e7f7e; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00fe000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x027e0000000000ff; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vslei_wu(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00fdffffffffff02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe80000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfe80ffffffffff02; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe80; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x30); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x027e0000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfe80ffffffffff02; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007fffffff; +- __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000d24; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000d24; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfe80ff80ffff0000; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000013; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001000000fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000013; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001000000fb; +- *((unsigned long*)& __m256i_result[3]) = 0x8080808180808093; +- *((unsigned long*)& __m256i_result[2]) = 0x80808081808080fe; +- *((unsigned long*)& __m256i_result[1]) = 0x8080808180808093; +- *((unsigned long*)& __m256i_result[0]) = 0x80808081808080fb; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000d24; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8080808180808093; +- *((unsigned long*)& __m256i_op0[2]) = 0x80808081808080fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x8080808180808093; +- *((unsigned long*)& __m256i_op0[0]) = 0x80808081808080fb; +- *((unsigned long*)& __m256i_result[3]) = 0xf5f5f5f5f5f5f5f5; +- *((unsigned long*)& __m256i_result[2]) = 0xf5f5f5f5f5f5f5fe; +- *((unsigned long*)& __m256i_result[1]) = 0xf5f5f5f5f5f5f5f5; +- *((unsigned long*)& __m256i_result[0]) = 0xf5f5f5f5f5f5f5fb; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; +- __m256i_out = __lasx_xvmaxi_d(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vslli_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0505050505050505; +- *((unsigned long*)& __m128i_result[0]) = 0x0505050504040404; +- __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00010013000100fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00010013000100fb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_result[0]) = 0x0400040004000400; +- __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe80ffffffffff02; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f3f018000000000; +- __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xf5f5f5f5f5f5f5f5; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xf5f5f5f5f5f5f5f5; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000004000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[2]) = 0xff04ff00ff00ff00; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[0]) = 0xff04ff00ff00ff00; +- __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- long_int_result = 0x0000000000000000; +- long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x1); +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x7f3f0180; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000800000098; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000040000ffca; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000800000098; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000040000ff79; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x04000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x04000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000800000098; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000040000ffca; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000800000098; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000040000ff79; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[2]) = 0xff04ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[0]) = 0xff04ff00ff00ff00; +- *((unsigned long*)& __m256i_result[3]) = 0x000000008000000a; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000008000000a; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x44); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000010000003f; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f007f007f007f00; +- *((unsigned long*)& __m128i_result[0]) = 0x000000010000003f; +- __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[2]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op0[0]) = 0x0400040004000400; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xf5f5f5f5f5f5f5f5; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xf5f5f5f5f5f5f5f5; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xf9f5f9f5f9f5f9f5; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xf9f5f9f5f9f5f9f5; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xf5f5f5f5f5f5f5f5; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xf5f5f5f5f5f5f5f5; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x8000000a; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x8000000a; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000010000003f; +- *((unsigned long*)& __m128d_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000010000003f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op0[2]) = 0xff04ff00ff00ff00; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op0[0]) = 0xff04ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; +- __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000030000003f; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x7fff7fff; +- *((int*)& __m128_op0[2]) = 0x7fff7fff; +- *((int*)& __m128_op0[1]) = 0x00000001; +- *((int*)& __m128_op0[0]) = 0x0000003f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000010000003f; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000010000003f; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000010000003f; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000030000003f; +- __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f007f007f007f00; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff0003003f; +- __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000030000003f; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xfffffffe; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000030000003f; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0x6a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000002; +- *((int*)& __m256_op0[6]) = 0x00000002; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000002; +- *((int*)& __m256_op0[2]) = 0x00000002; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000020; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000003; +- *((int*)& __m128_op0[0]) = 0x0000003f; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000003; +- *((int*)& __m128_op1[0]) = 0x0000003f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000400; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000400; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe00000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff01010105; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x80000000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000fffe0000fffe; +- __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffff00; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000400; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000400; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; +- __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fffe0000fffe; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000fffe0000fffe; +- __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xc1bdceee242070db; +- *((unsigned long*)& __m128i_op1[0]) = 0xe8c7b756d76aa478; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001800390049ffaa; +- *((unsigned long*)& __m128i_op0[0]) = 0x0029ff96005cff88; +- *((unsigned long*)& __m128i_result[1]) = 0x001800390049ffaa; +- *((unsigned long*)& __m128i_result[0]) = 0x0029ff96005cff88; +- __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x001800390049ffaa; +- *((unsigned long*)& __m128i_op1[0]) = 0x0029ff96005cff88; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff88; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op1[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001800000039; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000049ffffffaa; +- *((unsigned long*)& __m128i_result[1]) = 0x000000060000000e; +- *((unsigned long*)& __m128i_result[0]) = 0x000000127fffffea; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x22); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001800390049ffaa; +- *((unsigned long*)& __m128i_op0[0]) = 0x0029ff96005cff88; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00060012000e002b; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000049ffffffaa; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000e002b; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffaa; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff88; +- *((unsigned long*)& __m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; +- *((unsigned long*)& __m128i_result[0]) = 0xe5e5e5e5e4e4e46d; +- __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00060012000e002b; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000049ffffffaa; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000060000000e; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000127fffffea; +- *((unsigned long*)& __m128i_result[1]) = 0x000000060000000e; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001201fe01e9; +- __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000060000000e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001201fe01e9; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000060000000e; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001201fe01e9; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000c0000001c; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002403fc03d2; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000060000000e; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000127fffffea; +- *((unsigned long*)& __m128i_result[1]) = 0x7f0101070101010f; +- *((unsigned long*)& __m128i_result[0]) = 0x000000127f010116; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe5e5e5e5e5e5e5e5; +- *((unsigned long*)& __m128i_op0[0]) = 0xe5e5e5e5e4e4e46d; +- *((unsigned long*)& __m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; +- *((unsigned long*)& __m128i_result[0]) = 0xe5e5e5e5e4e4e46d; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfff8fff8fff8fff8; +- __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf2f2e5e5e5e5e5e5; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xf2f2e5e5e5e5e5dc; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xf2f2e5e5; +- *((int*)& __m128_op0[2]) = 0xe5e5e5e5; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xe5e5e5e5; +- *((int*)& __m128_op1[2]) = 0xe5e5e5e5; +- *((int*)& __m128_op1[1]) = 0xe5e5e5e5; +- *((int*)& __m128_op1[0]) = 0xe4e4e46d; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf2f2e5e5e5e5e5dc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff80ff80ff80ff80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff80ff80ff80ff80; +- __m256i_out = __lasx_xvslli_h(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f0101070101010f; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000127f010116; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ffffffffff; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000ffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x120e120dedf1edf2; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x120e120dedf1edf2; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x8001800180018001; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x8001800180018001; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffff80000001; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x120e120dedf1edf2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x120e120dedf1edf2; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000120e120d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000120e120d; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000020000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000020000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000020000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000020000000000; +- __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x29); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000120e120d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000120e120d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000907; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000907; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x67); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000200; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000200; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000200; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000200; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0x89); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- unsigned_long_int_result = 0xffffffffffffffff; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x2); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000907; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000907; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007e007e007e007e; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000907; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000907; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3fffffffc0000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; +- __m128i_out = __lsx_vexth_w_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xbfffbfffbfffbffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000907; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000907; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x3fffffffc0000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000200; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000200; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000200; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000200; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000009; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000009; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000009; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; +- int_result = 0xffffffffffffffff; +- int_out = __lsx_vpickve2gr_b(__m128i_op0,0xc); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op2[1]) = 0xbfffbfffbfffbffe; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4000400040004002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- unsigned_int_result = 0x00000000ffffffff; +- unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; +- __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- long_int_result = 0xffffffffffffffff; +- long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x1); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbfffbfffbfffbffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xbfffbfffbfffbffe; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xbfffbfffbfffbffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001ffff0001ffff; +- __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; +- __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000045f3fb; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000045f3fb; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; +- __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000008080809; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000008080809; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000008080809; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000008080809; +- __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x5353535353535353; +- *((unsigned long*)& __m256i_result[2]) = 0x5353535353535353; +- *((unsigned long*)& __m256i_result[1]) = 0x5353535353535353; +- *((unsigned long*)& __m256i_result[0]) = 0x5353535353535353; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0x53); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000045f3fb; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000045f3fb; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffba0c05; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffba0c05; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128d_op1[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256d_op0[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256d_op0[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256d_op0[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffba0c05; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffba0c05; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffba0c05; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffba0c05; +- __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_d(__m128i_op0,0x37); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfe01fe01fe01fe01; +- *((unsigned long*)& __m128i_result[0]) = 0xfe01fe01fe01fe01; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfe01fe01fe01fe01; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe01fe01fe01fe01; +- *((unsigned long*)& __m128i_op1[1]) = 0xfe01fe01fe01fe01; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe01fe01fe01fe01; +- *((unsigned long*)& __m128i_op2[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op2[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_result[1]) = 0xf10cf508f904fd01; +- *((unsigned long*)& __m128i_result[0]) = 0xf10cf508f904fd01; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffba0c05; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffba0c05; +- *((unsigned long*)& __m256i_op1[3]) = 0x5353535353535353; +- *((unsigned long*)& __m256i_op1[2]) = 0x5353535353535353; +- *((unsigned long*)& __m256i_op1[1]) = 0x5353535353535353; +- *((unsigned long*)& __m256i_op1[0]) = 0x5353535353535353; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0303030303020000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0303030303020000; +- __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op1[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_result[1]) = 0xff807f807f807f80; +- *((unsigned long*)& __m128i_result[0]) = 0xff807f807f807f80; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff807f807f807f80; +- *((unsigned long*)& __m128i_op0[0]) = 0xff807f807f807f80; +- *((unsigned long*)& __m128i_result[1]) = 0xfb807b807b807b80; +- *((unsigned long*)& __m128i_result[0]) = 0xfb807b807b807b80; +- __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xf10cf508f904fd01; +- *((unsigned long*)& __m128i_op1[0]) = 0xf10cf508f904fd01; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf10cf508f904fd01; +- *((unsigned long*)& __m128i_op0[0]) = 0xf10cf508f904fd01; +- *((unsigned long*)& __m128i_result[1]) = 0xffffe218ffffea10; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffe218ffffea10; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; +- __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[2]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[1]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[0]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op2[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op2[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op2[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op2[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0303030303020000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0303030303020000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007f017f01; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007f017f01; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007f017f01; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007f017f01; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000007f017f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000007f017f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[2]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[1]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[0]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[3]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[2]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[1]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[0]) = 0x03f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[3]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_result[2]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_result[1]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_result[0]) = 0x07efefefefefefee; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000045f3fb; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000045f3fb; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000004500f300fb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004500f300fb; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_d(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff9; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff9; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_op1[2]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_op1[1]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_op1[0]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_op1[2]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_op1[1]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_op1[0]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001fbfbfc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001fbfbfc; +- __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x62); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_result[2]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_result[1]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_result[0]) = 0x0010000000100000; +- __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x5959595959595959; +- *((unsigned long*)& __m128i_result[0]) = 0x5959595959595959; +- __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x59); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,-7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffb80000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffb80000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe218ffffea10; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff208fffffa02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xfffff208fffffa02; +- *((unsigned long*)& __m128i_result[1]) = 0xffffe218ffffea10; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff208fffffa02; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000004500f300fb; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000004500f300fb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffb80000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffb80000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_op1[2]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_op1[1]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_op1[0]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x07efefefefefefee; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x07efefefefefefee; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xf8f8e018f8f8e810; +- *((unsigned long*)& __m128i_op1[0]) = 0xf8f8f008f8f8f800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000045000d0005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000045000d0005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xf8f8e018f8f8e810; +- *((unsigned long*)& __m128i_op1[0]) = 0xf8f8f008f8f8f800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000e0180000e810; +- *((unsigned long*)& __m128i_result[0]) = 0x0000f0080000f800; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_result[3]) = 0x1010000010100000; +- *((unsigned long*)& __m256i_result[2]) = 0x1010000010100000; +- *((unsigned long*)& __m256i_result[1]) = 0x1010000010100000; +- *((unsigned long*)& __m256i_result[0]) = 0x1010000010100000; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000004800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000004500f300fb; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000004800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000004500f300fb; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000004800000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000004500f300fb; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000004800000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000004500f300fb; +- *((unsigned long*)& __m256i_result[3]) = 0x7b7b7b7b80000000; +- *((unsigned long*)& __m256i_result[2]) = 0xcacacb1011040500; +- *((unsigned long*)& __m256i_result[1]) = 0x7b7b7b7b80000000; +- *((unsigned long*)& __m256i_result[0]) = 0xcacacb1011040500; +- __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0010000000100000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0010000000100000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0010000000100000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0010000000100000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x41cc5bb8a95fd1eb; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x41cc5bb8a95fd1eb; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x41cc5bb8a95fd1eb; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x41cc5bb8a95fd1eb; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7b7b7b7b80000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xcacacb1011040500; +- *((unsigned long*)& __m256i_op1[1]) = 0x7b7b7b7b80000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xcacacb1011040500; +- *((unsigned long*)& __m256i_result[3]) = 0x49cc5bb8a95fd1eb; +- *((unsigned long*)& __m256i_result[2]) = 0x7ff4080102102001; +- *((unsigned long*)& __m256i_result[1]) = 0x49cc5bb8a95fd1eb; +- *((unsigned long*)& __m256i_result[0]) = 0x7ff4080102102001; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000e0180000e810; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000f0080000f800; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000e0180000e810; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000f0080000f800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010146; +- *((unsigned long*)& __m256i_result[2]) = 0x01010101010e0106; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010146; +- *((unsigned long*)& __m256i_result[0]) = 0x01010101010e0106; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000e0180000e810; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000f0080000f800; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000e0180000e810; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000f0080000f800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000f0f800; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_result[2]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_result[1]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_result[0]) = 0x0010000000100000; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010000000000; +- __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfrint_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00100000; +- *((int*)& __m256_op0[6]) = 0x00100000; +- *((int*)& __m256_op0[5]) = 0x00100000; +- *((int*)& __m256_op0[4]) = 0x00100000; +- *((int*)& __m256_op0[3]) = 0x00100000; +- *((int*)& __m256_op0[2]) = 0x00100000; +- *((int*)& __m256_op0[1]) = 0x00100000; +- *((int*)& __m256_op0[0]) = 0x00100000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vclz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00080000002c0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0008000000080000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00080000002c0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0008000000080000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00080000002c0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00080000002c0000; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x4c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_w(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d0005; +- __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; +- __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x50); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1211100f11100f0e; +- *((unsigned long*)& __m128i_op0[0]) = 0x100f0e0d0f0e0d0c; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[1]) = 0x11000f2010000e20; +- *((unsigned long*)& __m128i_result[0]) = 0x0f000d200e000c20; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010000000100000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000483800; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000583800; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000100000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000583800; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000100000; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x11000f2010000e20; +- *((unsigned long*)& __m128i_op1[0]) = 0x0f000d200e000c20; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x11000f200f000d20; +- __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; +- *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; +- *((unsigned long*)& __m128i_result[1]) = 0x11000f2010000e20; +- *((unsigned long*)& __m128i_result[0]) = 0x0f000d200e000c20; +- __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x11000f20; +- *((int*)& __m128_op0[2]) = 0x10000e20; +- *((int*)& __m128_op0[1]) = 0x0f000d20; +- *((int*)& __m128_op0[0]) = 0x0e000c20; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000d000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000d000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000583800; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000583800; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000d0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d0000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; +- *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_b(__m128i_op0,-6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000045; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000000d0005; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000045; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000000d0005; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000045; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000000d0005; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000045; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000045; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d0005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000013b13380; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000013b13380; +- __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; +- *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x11000f2010000e20; +- *((unsigned long*)& __m128i_result[0]) = 0x0f000d200e000c20; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x11000f20; +- *((int*)& __m128_op0[2]) = 0x10000e20; +- *((int*)& __m128_op0[1]) = 0x0f000d20; +- *((int*)& __m128_op0[0]) = 0x0e000c20; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; +- *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x11000f2000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0f000d2000000000; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x11000f2010000e20; +- *((unsigned long*)& __m128i_op0[0]) = 0x0f000d200e000c20; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xe3e3e3e3e3e3e3e3; +- *((unsigned long*)& __m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; +- __m128i_out = __lsx_vxori_b(__m128i_op0,0xe3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xff00ffffff00ffff; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xff00ffffff00ffff; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000d0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d0000; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[2]) = 0x4000404040004040; +- *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[0]) = 0x4000404040004040; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x40); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0008000000080000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0008000000080000; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; +- *((unsigned long*)& __m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((int*)& __m128_result[3]) = 0x4f800000; +- *((int*)& __m128_result[2]) = 0x4f800000; +- *((int*)& __m128_result[1]) = 0x4f800000; +- *((int*)& __m128_result[0]) = 0x4f800000; +- __m128_out = __lsx_vffint_s_wu(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xe3e3e3e3e3e3e3e3; +- *((unsigned long*)& __m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; +- *((unsigned long*)& __m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffe01fe01f; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffe01fe01f; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffe01fe01f; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffe01fe01f; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_bu(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xff00ffff; +- *((int*)& __m256_op0[6]) = 0xff00ffff; +- *((int*)& __m256_op0[5]) = 0xff00ffff; +- *((int*)& __m256_op0[4]) = 0xff00ffff; +- *((int*)& __m256_op0[3]) = 0xff00ffff; +- *((int*)& __m256_op0[2]) = 0xff00ffff; +- *((int*)& __m256_op0[1]) = 0xff00ffff; +- *((int*)& __m256_op0[0]) = 0xff00ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000fe01020b0001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000fe01020b0001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x4f8000004f800000; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x64); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f8000004f800000; +- *((unsigned long*)& __m128d_result[1]) = 0x43d3e0000013e000; +- *((unsigned long*)& __m128d_result[0]) = 0x43d3e0000013e000; +- __m128d_out = __lsx_vffint_d_l(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xff00d5007f00ffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xff00d5007f00ffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256d_result[3]) = 0x7f00d5007f00ffff; +- *((unsigned long*)& __m256d_result[2]) = 0x7f00ffffff00ffff; +- *((unsigned long*)& __m256d_result[1]) = 0x7f00d5007f00ffff; +- *((unsigned long*)& __m256d_result[0]) = 0x7f00ffffff00ffff; +- __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffb080ffffb080; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffb080ffffb080; +- *((unsigned long*)& __m128i_op2[1]) = 0x004fcfcfd01f9f9f; +- *((unsigned long*)& __m128i_op2[0]) = 0x9f4fcfcfcf800000; +- *((unsigned long*)& __m128i_result[1]) = 0x3504b5fd2dee1f80; +- *((unsigned long*)& __m128i_result[0]) = 0x4676f70fc0000000; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000fe01020b0001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000fe01020b0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0fff0fff00000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0fff0fff00000020; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0fff0fff00000020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0fff0fff00000020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00d5007f00ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00d5007f00ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x43d3e0000013e000; +- *((unsigned long*)& __m128i_op0[0]) = 0x43d3e0000013e000; +- *((unsigned long*)& __m128i_result[1]) = 0x43d3e0000013e000; +- *((unsigned long*)& __m128i_result[0]) = 0x43d3e0000013e000; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffff3fffffff3; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff3fffffff3; +- __m128i_out = __lsx_vmini_w(__m128i_op0,-13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004fcfcfd01f9f9f; +- *((unsigned long*)& __m128i_op0[0]) = 0x9f4fcfcfcf800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x004fcfcfd01f9f9f; +- *((unsigned long*)& __m128i_op1[0]) = 0x9f4fcfcfcf800000; +- *((unsigned long*)& __m128i_result[1]) = 0x004f1fcfd01f9f9f; +- *((unsigned long*)& __m128i_result[0]) = 0x9f4fcfcfcf800000; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xda); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x43d3e0000013e000; +- *((unsigned long*)& __m128i_op2[0]) = 0x43d3e0000013e000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xc14eef7fc14ea000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000ea000010fa101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x43d3e0000013e000; +- *((unsigned long*)& __m128i_op0[0]) = 0x43d3e0000013e000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffd3000000130000; +- *((unsigned long*)& __m128i_result[0]) = 0xffd3000000130000; +- __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffff3fffffff3; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffff3fffffff3; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffff3fffffff4; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff3fffffff4; +- __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffd3000000130000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffd3000000130000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffd3000000130000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffd3000000130000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffd3000000130000; +- *((unsigned long*)& __m128i_result[0]) = 0xffd3000000130000; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x004f1fcfd01f9f9f; +- *((unsigned long*)& __m128d_op0[0]) = 0x9f4fcfcfcf800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x000000004f804f80; +- *((unsigned long*)& __m128i_result[0]) = 0x000000004f804f80; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffd30000; +- *((int*)& __m128_op0[2]) = 0x00130000; +- *((int*)& __m128_op0[1]) = 0xffd30000; +- *((int*)& __m128_op0[0]) = 0x00130000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00d5007f00ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ffffff00ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00d5007f00ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ffffff00ffff; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000055ff01f90ab5; +- *((unsigned long*)& __m256i_op0[2]) = 0xaa95eafffec6e01f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000055ff01f90ab5; +- *((unsigned long*)& __m256i_op0[0]) = 0xaa95eafffec6e01f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfec6e01ffec6e01f; +- *((unsigned long*)& __m256i_result[2]) = 0xfec6e01ffec6e01f; +- *((unsigned long*)& __m256i_result[1]) = 0xfec6e01ffec6e01f; +- *((unsigned long*)& __m256i_result[0]) = 0xfec6e01ffec6e01f; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- long_int_result = 0x0000000000000000; +- long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000003f00390035; +- *((unsigned long*)& __m256i_op0[2]) = 0x8015003f0006001f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000003f00390035; +- *((unsigned long*)& __m256i_op0[0]) = 0x8015003f0006001f; +- *((unsigned long*)& __m256i_result[3]) = 0x000b004a00440040; +- *((unsigned long*)& __m256i_result[2]) = 0x8020004a0011002a; +- *((unsigned long*)& __m256i_result[1]) = 0x000b004a00440040; +- *((unsigned long*)& __m256i_result[0]) = 0x8020004a0011002a; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x80000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x80000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x80000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x80000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfrint_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x0000003f; +- *((int*)& __m256_op0[6]) = 0x00390035; +- *((int*)& __m256_op0[5]) = 0x8015003f; +- *((int*)& __m256_op0[4]) = 0x0006001f; +- *((int*)& __m256_op0[3]) = 0x0000003f; +- *((int*)& __m256_op0[2]) = 0x00390035; +- *((int*)& __m256_op0[1]) = 0x8015003f; +- *((int*)& __m256_op0[0]) = 0x0006001f; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000b004a00440040; +- *((unsigned long*)& __m256i_op0[2]) = 0x8020004a0011002a; +- *((unsigned long*)& __m256i_op0[1]) = 0x000b004a00440040; +- *((unsigned long*)& __m256i_op0[0]) = 0x8020004a0011002a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000004a00000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000004a0000002a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000004a00000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004a0000002a; +- __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000b004a00440040; +- *((unsigned long*)& __m256d_op0[2]) = 0x8020004a0011002a; +- *((unsigned long*)& __m256d_op0[1]) = 0x000b004a00440040; +- *((unsigned long*)& __m256d_op0[0]) = 0x8020004a0011002a; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0fff0fff00000020; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0fff0fff00000020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x000055ff; +- *((int*)& __m256_op0[6]) = 0x01f90ab5; +- *((int*)& __m256_op0[5]) = 0xaa95eaff; +- *((int*)& __m256_op0[4]) = 0xfec6e01f; +- *((int*)& __m256_op0[3]) = 0x000055ff; +- *((int*)& __m256_op0[2]) = 0x01f90ab5; +- *((int*)& __m256_op0[1]) = 0xaa95eaff; +- *((int*)& __m256_op0[0]) = 0xfec6e01f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- long_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000003f00390035; +- *((unsigned long*)& __m256i_op0[2]) = 0x8015003f0006001f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000003f00390035; +- *((unsigned long*)& __m256i_op0[0]) = 0x8015003f0006001f; +- *((unsigned long*)& __m256i_op1[3]) = 0x80000000001529c1; +- *((unsigned long*)& __m256i_op1[2]) = 0x80007073cadc3779; +- *((unsigned long*)& __m256i_op1[1]) = 0x80000000001529c1; +- *((unsigned long*)& __m256i_op1[0]) = 0x80007073cadc3779; +- *((unsigned long*)& __m256i_result[3]) = 0x00008000003f0000; +- *((unsigned long*)& __m256i_result[2]) = 0x00390015003529c1; +- *((unsigned long*)& __m256i_result[1]) = 0x00008000003f0000; +- *((unsigned long*)& __m256i_result[0]) = 0x00390015003529c1; +- __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x004f0080004f0080; +- *((unsigned long*)& __m128i_result[0]) = 0x004f0080004f0080; +- __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x80000000001529c1; +- *((unsigned long*)& __m256i_op0[2]) = 0x80007073cadc3779; +- *((unsigned long*)& __m256i_op0[1]) = 0x80000000001529c1; +- *((unsigned long*)& __m256i_op0[0]) = 0x80007073cadc3779; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_d(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x9f009f009f009f00; +- *((unsigned long*)& __m128i_result[0]) = 0x9f009f009f009f00; +- __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffff8001ffff8001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; +- __m256i_out = __lasx_xvslti_hu(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x0000ffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x0000ffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfrintrm_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fffffffefffe; +- *((unsigned long*)& __m256i_result[1]) = 0xff7fffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fffffffefffe; +- __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x004f0080; +- *((int*)& __m128_op0[2]) = 0x004f0080; +- *((int*)& __m128_op0[1]) = 0x004f0080; +- *((int*)& __m128_op0[0]) = 0x004f0080; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x7fff7fff; +- *((int*)& __m128_op2[2]) = 0x7fff7fff; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7fff7fff; +- *((int*)& __m128_result[2]) = 0x7fff7fff; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff8001ffff8001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_result[0]) = 0x000fffefffefffef; +- __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x4b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000004a00000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000004a0000002a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000004a00000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000004a0000002a; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000fffffffefffe; +- *((unsigned long*)& __m256i_op1[1]) = 0xff7fffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000fffffffefffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002500000020; +- *((unsigned long*)& __m256i_result[2]) = 0x00008024ffff8014; +- *((unsigned long*)& __m256i_result[1]) = 0xffc0002500000020; +- *((unsigned long*)& __m256i_result[0]) = 0x00008024ffff8014; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010100000000; +- __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff00000000; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_op1[2]) = 0x556caad9aabbaa88; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_op1[0]) = 0x556caad9aabbaa88; +- *((unsigned long*)& __m256i_result[3]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_result[2]) = 0x556caad9aabbaa88; +- *((unsigned long*)& __m256i_result[1]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_result[0]) = 0x556caad9aabbaa88; +- __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_op0[2]) = 0x556caad9aabbaa88; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_op0[0]) = 0x556caad9aabbaa88; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_op1[2]) = 0x556caad9aabbaa88; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_op1[0]) = 0x556caad9aabbaa88; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_result[2]) = 0x556caad9aabbaa88; +- *((unsigned long*)& __m256i_result[1]) = 0x0000004a557baac4; +- *((unsigned long*)& __m256i_result[0]) = 0x556caad9aabbaa88; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7ffe7ffe7ffe7ffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000010100000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000010100000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x00008000003f0000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00390015003529c1; +- *((unsigned long*)& __m256d_op1[1]) = 0x00008000003f0000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00390015003529c1; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_result[2]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_result[1]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_result[0]) = 0x0909090909090909; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_result[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010003; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x0000004a; +- *((int*)& __m256_op0[6]) = 0x557baac4; +- *((int*)& __m256_op0[5]) = 0x556caad9; +- *((int*)& __m256_op0[4]) = 0xaabbaa88; +- *((int*)& __m256_op0[3]) = 0x0000004a; +- *((int*)& __m256_op0[2]) = 0x557baac4; +- *((int*)& __m256_op0[1]) = 0x556caad9; +- *((int*)& __m256_op0[0]) = 0xaabbaa88; +- *((int*)& __m256_op1[7]) = 0x09090909; +- *((int*)& __m256_op1[6]) = 0x09090909; +- *((int*)& __m256_op1[5]) = 0x09090909; +- *((int*)& __m256_op1[4]) = 0x09090909; +- *((int*)& __m256_op1[3]) = 0x09090909; +- *((int*)& __m256_op1[2]) = 0x09090909; +- *((int*)& __m256_op1[1]) = 0x09090909; +- *((int*)& __m256_op1[0]) = 0x09090909; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op2[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_op2[0]) = 0x000fffefffefffef; +- *((unsigned long*)& __m128i_result[1]) = 0x8009700478185812; +- *((unsigned long*)& __m128i_result[0]) = 0xe009f00ee7fb0800; +- __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; +- __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_op1[2]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_op1[1]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_op1[0]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_result[3]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ff00ff00; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ffffff00; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ff00ff00; +- __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ffe7ffe7ffe7ffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00007ffe00007ffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000003; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010002; +- *((unsigned long*)& __m256i_result[1]) = 0x0080000200000003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010002; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00009f0000009f00; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x4f804f80; +- *((int*)& __m128_op0[0]) = 0x4f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_result[0]) = 0x000fffefffefffef; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00007fff; +- *((int*)& __m128_op1[2]) = 0x00007fff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00007fff; +- *((int*)& __m128_result[2]) = 0x00007fff; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128d_op0[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128d_op1[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00010002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00010002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00010002; +- *((unsigned long*)& __m256i_result[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00010002; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000200000003; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff00010002; +- *((unsigned long*)& __m256d_op1[1]) = 0x0080000200000003; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000ffff00010002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x75b043c4d17db125; +- *((unsigned long*)& __m128i_op0[0]) = 0xeef8227b596117b1; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x75b043c4d17db125; +- *((unsigned long*)& __m128i_result[0]) = 0xeef8227b4f8017b1; +- __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffefffef; +- __m128i_out = __lsx_vmini_w(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x75b043c4d17db125; +- *((unsigned long*)& __m128i_op1[0]) = 0xeef8227b4f8017b1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; +- __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000de32400; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; +- __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x77); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_op1[0]) = 0x000fffefffefffef; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00000000; +- __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; +- *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_result[0]) = 0x000fffefffefffef; +- __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000070700000707; +- *((unsigned long*)& __m256i_op1[2]) = 0x000009091b1b1212; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000070700000707; +- *((unsigned long*)& __m256i_op1[0]) = 0x000009091b1b1212; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_op0[0]) = 0x000fffefffefffef; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffefffef; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_op1[0]) = 0x000fffefffefffef; +- *((unsigned long*)& __m128i_result[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_result[0]) = 0x028c026bfff027af; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x001ffff0003ffff0; +- *((unsigned long*)& __m128i_op1[0]) = 0x028c026bfff027af; +- *((unsigned long*)& __m128i_result[1]) = 0x00000003fc03fc00; +- *((unsigned long*)& __m128i_result[0]) = 0xffffc00a3009b000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x55555555; +- *((int*)& __m256_op0[5]) = 0x00000001; +- *((int*)& __m256_op0[4]) = 0x00000004; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x55555555; +- *((int*)& __m256_op0[1]) = 0x00000001; +- *((int*)& __m256_op0[0]) = 0x00000004; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007fff00000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0040000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007fff00000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000055555555; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000004; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000055555555; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000004; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2aaaaaaa2aaaaaab; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x2aaaaaaa2aaaaaab; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_result[2]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_result[1]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_result[0]) = 0x1111111111111111; +- __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x027c027c000027c0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_op0[2]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_op0[1]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_op0[0]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0888888888888888; +- *((unsigned long*)& __m256i_result[2]) = 0x0888888888888888; +- *((unsigned long*)& __m256i_result[1]) = 0x0888888888888888; +- *((unsigned long*)& __m256i_result[0]) = 0x0888888888888888; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; +- __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x75b043c4d17db125; +- *((unsigned long*)& __m128i_op0[0]) = 0xeef8227b4f8017b1; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x027c027c000027c0; +- *((unsigned long*)& __m128i_result[1]) = 0x75b043c4007db125; +- *((unsigned long*)& __m128i_result[0]) = 0xeef8227b4f8017b1; +- __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x004f0080004f0080; +- *((unsigned long*)& __m128i_result[0]) = 0x004f0080004f0080; +- __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001000fbff9; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000002ff9afef; +- *((unsigned long*)& __m128i_result[1]) = 0x000000004f804f81; +- *((unsigned long*)& __m128i_result[0]) = 0x000000004f804f80; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001020202; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001020202; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x027c027c000027c0; +- __m128i_out = __lsx_vmaxi_h(__m128i_op0,-6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_op1[2]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_op1[1]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_op1[0]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1111111111111111; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1111111111111111; +- __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xdededededededede; +- *((unsigned long*)& __m256i_result[2]) = 0xdededededededede; +- *((unsigned long*)& __m256i_result[1]) = 0xdededededededede; +- *((unsigned long*)& __m256i_result[0]) = 0xdededededededede; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0x21); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x027c027c000027c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000004f804f81; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000004f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001400000014; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363797c63996399; +- *((unsigned long*)& __m128i_op0[0]) = 0x171f0a1f6376441f; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363797c63996399; +- *((unsigned long*)& __m128i_op1[0]) = 0x171f0a1f6376441f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x4f804f81; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x4f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000004f804f81; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000004f804f80; +- *((unsigned long*)& __m128i_result[1]) = 0x000000004fc04f81; +- *((unsigned long*)& __m128i_result[0]) = 0x000000004fc04f80; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xdededededededede; +- *((unsigned long*)& __m256i_op1[2]) = 0xdededededededede; +- *((unsigned long*)& __m256i_op1[1]) = 0xdededededededede; +- *((unsigned long*)& __m256i_op1[0]) = 0xdededededededede; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8888888808888888; +- *((unsigned long*)& __m256i_op0[2]) = 0x0888888888888888; +- *((unsigned long*)& __m256i_op0[1]) = 0x8888888808888888; +- *((unsigned long*)& __m256i_op0[0]) = 0x0888888888888888; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x77777777f7777777; +- *((unsigned long*)& __m256i_result[2]) = 0xf777777777777777; +- *((unsigned long*)& __m256i_result[1]) = 0x77777777f7777777; +- *((unsigned long*)& __m256i_result[0]) = 0xf777777777777777; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xdededede; +- *((int*)& __m256_op0[6]) = 0xdededede; +- *((int*)& __m256_op0[5]) = 0xdededede; +- *((int*)& __m256_op0[4]) = 0xdededede; +- *((int*)& __m256_op0[3]) = 0xdededede; +- *((int*)& __m256_op0[2]) = 0xdededede; +- *((int*)& __m256_op0[1]) = 0xdededede; +- *((int*)& __m256_op0[0]) = 0xdededede; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363797c63996399; +- *((unsigned long*)& __m128i_op0[0]) = 0x171f0a1f6376441f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x6363797c63990099; +- *((unsigned long*)& __m128i_result[0]) = 0x171f0a1f6376441f; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0x94); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363797c63990099; +- *((unsigned long*)& __m128i_op0[0]) = 0x171f0a1f6376441f; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363797c63990099; +- *((unsigned long*)& __m128i_op1[0]) = 0x171f0a1f6376441f; +- *((unsigned long*)& __m128i_result[1]) = 0x181e180005021811; +- *((unsigned long*)& __m128i_result[0]) = 0x181e180005021811; +- __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x77777777; +- *((int*)& __m256_op0[6]) = 0xf7777777; +- *((int*)& __m256_op0[5]) = 0xf7777777; +- *((int*)& __m256_op0[4]) = 0x77777777; +- *((int*)& __m256_op0[3]) = 0x77777777; +- *((int*)& __m256_op0[2]) = 0xf7777777; +- *((int*)& __m256_op0[1]) = 0xf7777777; +- *((int*)& __m256_op0[0]) = 0x77777777; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; +- __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbsll_v(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x80000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x80000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x77777777f7777777; +- *((unsigned long*)& __m256i_op0[2]) = 0xf777777777777777; +- *((unsigned long*)& __m256i_op0[1]) = 0x77777777f7777777; +- *((unsigned long*)& __m256i_op0[0]) = 0xf777777777777777; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000004fc04f81; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000004fc04f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000004fc04f81; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000004fc04f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000004fc04f81; +- *((unsigned long*)& __m128i_result[0]) = 0x000000004fc04f80; +- __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; +- *((unsigned long*)& __m128i_result[1]) = 0x9292929292929292; +- *((unsigned long*)& __m128i_result[0]) = 0x8090808280909002; +- __m128i_out = __lsx_vnori_b(__m128i_op0,0x6d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000010; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000010; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000002b902b3e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000002b902b3e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000002a102a3a; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000002a102a3a; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x3a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op0[2]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op0[0]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_result[3]) = 0x1000100054445443; +- *((unsigned long*)& __m256i_result[2]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_result[1]) = 0x1000100054445443; +- *((unsigned long*)& __m256i_result[0]) = 0x7bbbbbbbf7777778; +- __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvexth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000004fc04f81; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000004fc04f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00007f7f; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3e035e51522f0799; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3e035e51522f0799; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000002bfd9461; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000004fc04f81; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000004fc04f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001c00ffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m128i_result[1]) = 0x000001000f00fe00; +- *((unsigned long*)& __m128i_result[0]) = 0x0000017fff00fe7f; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff10; +- __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op1[2]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op1[0]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007bbbbbbb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007bbbbbbb; +- __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x8d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op0[2]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op0[0]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007bbbbbbb; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007bbbbbbb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000073333333; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000073333333; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000001000f00fe00; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000017fff00fe7f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,-8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001c00ffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010201808040; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010280808040; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000073333333; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000073333333; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000073333333; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000073333333; +- __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op0[2]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op0[0]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; +- *((unsigned long*)& __m128i_op2[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000001000f00fe00; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000017fff00fe7f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000f00; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff00; +- __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000100; +- *((int*)& __m128_op0[2]) = 0x0f00fe00; +- *((int*)& __m128_op0[1]) = 0x0000017f; +- *((int*)& __m128_op0[0]) = 0xff00fe7f; +- *((unsigned long*)& __m128d_result[1]) = 0x3727f00000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xc7e01fcfe0000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xf7fdd5ffebe1c9e3; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xf7fdd5ffebe1c9e3; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000002467db99; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000003e143852; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000002467db99; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000003e143852; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffdb982466; +- *((unsigned long*)& __m256i_result[2]) = 0xf7fdd5ffadcd9191; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffdb982466; +- *((unsigned long*)& __m256i_result[0]) = 0xf7fdd5ffadcd9191; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ffa7f8ff81; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000003f0080ffc0; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007fff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000a7f87fffff81; +- __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; +- *((unsigned long*)& __m128i_result[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000002bfd9461; +- __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00003ff000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_w_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000002467db99; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003e143852; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000002467db99; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003e143852; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op1[2]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op1[0]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_result[3]) = 0x0000246700003e14; +- *((unsigned long*)& __m256i_result[2]) = 0x000044447bbbf777; +- *((unsigned long*)& __m256i_result[1]) = 0x0000246700003e14; +- *((unsigned long*)& __m256i_result[0]) = 0x000044447bbbf777; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000073333333; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000073333333; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x56); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; +- *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000007fff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000002bfd9461; +- __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ffa7f8ff81; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000003f0080ffc0; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000007fff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000a7f87fffff81; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffd400000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000004000000040; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op1[2]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000044444443; +- *((unsigned long*)& __m256i_op1[0]) = 0x7bbbbbbbf7777778; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000004444; +- *((unsigned long*)& __m256i_result[2]) = 0x00007bbb0000f777; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000004444; +- *((unsigned long*)& __m256i_result[0]) = 0x00007bbb0000f777; +- __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004444; +- *((unsigned long*)& __m256i_op1[2]) = 0x00007bbb0000f777; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004444; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007bbb0000f777; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000002222; +- *((unsigned long*)& __m256i_result[2]) = 0x00003dde00007bbc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000002222; +- *((unsigned long*)& __m256i_result[0]) = 0x00003dde00007bbc; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000f00; +- *((unsigned long*)& __m128i_op2[0]) = 0x00000000ffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000002bfd9461; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3727f00000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc7e01fcfe0000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3727112c00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x39201f7120000040; +- *((unsigned long*)& __m128i_op2[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xe5b9012c00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xc7e01fcfe0000000; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000022222221; +- *((unsigned long*)& __m256i_op0[2]) = 0x3dddddddfbbb3bbc; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000022222221; +- *((unsigned long*)& __m256i_op0[0]) = 0x3dddddddfbbb3bbc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00007fff; +- *((int*)& __m128_op0[2]) = 0x00007fff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x2bfd9461; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x2bfd9461; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000f00; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0x1ff800000000477f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000015fec9b0; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00070000; +- *((int*)& __m128_op0[2]) = 0x00040000; +- *((int*)& __m128_op0[1]) = 0x00030000; +- *((int*)& __m128_op0[0]) = 0x00010000; +- *((int*)& __m128_op1[3]) = 0x00070000; +- *((int*)& __m128_op1[2]) = 0x00040000; +- *((int*)& __m128_op1[1]) = 0x00030000; +- *((int*)& __m128_op1[0]) = 0x00010000; +- *((int*)& __m128_result[3]) = 0x3f800000; +- *((int*)& __m128_result[2]) = 0x3f800000; +- *((int*)& __m128_result[1]) = 0x3f800000; +- *((int*)& __m128_result[0]) = 0x3f800000; +- __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; +- *((unsigned long*)& __m128i_result[1]) = 0x0000400400004004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000015ff4a31; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000800000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128d_op0[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128d_op1[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128d_op1[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000f00; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004444; +- *((unsigned long*)& __m256i_op1[2]) = 0x00007bbb0000f777; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004444; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007bbb0000f777; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000002222; +- *((unsigned long*)& __m256i_result[2]) = 0x00003ddd80007bbb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000002222; +- *((unsigned long*)& __m256i_result[0]) = 0x00003ddd80007bbb; +- __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800001; +- *((unsigned long*)& __m128i_result[0]) = 0x3f8000003f800001; +- __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0007000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0007000000040000; +- *((unsigned long*)& __m128i_result[0]) = 0x0003000000010000; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000002bfd9461; +- *((unsigned long*)& __m128i_op2[1]) = 0x3f8000003f800001; +- *((unsigned long*)& __m128i_op2[0]) = 0x3f8000003f800001; +- *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x3f8000003f800000; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0007000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000780000007800; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0007000000040000; +- *((unsigned long*)& __m128i_result[0]) = 0x0003000000010000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000002222; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003ddd80007bbb; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000002222; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003ddd80007bbb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001700170017; +- __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000002222; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003ddd80007bbb; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000002222; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003ddd80007bbb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x31); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000170017; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000170017; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0007000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0003000000010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000000010000; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000017; +- __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800001; +- *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800001; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000000010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000000010001; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00030000; +- *((int*)& __m128_op0[2]) = 0x00010000; +- *((int*)& __m128_op0[1]) = 0x00020000; +- *((int*)& __m128_op0[0]) = 0x00010000; +- *((int*)& __m128_op1[3]) = 0x3f800000; +- *((int*)& __m128_op1[2]) = 0x3f800000; +- *((int*)& __m128_op1[1]) = 0x3f800000; +- *((int*)& __m128_op1[0]) = 0x3f800000; +- *((int*)& __m128_op2[3]) = 0x00030000; +- *((int*)& __m128_op2[2]) = 0x00010000; +- *((int*)& __m128_op2[1]) = 0x00020000; +- *((int*)& __m128_op2[0]) = 0x00010000; +- *((int*)& __m128_result[3]) = 0x80060000; +- *((int*)& __m128_result[2]) = 0x80020000; +- *((int*)& __m128_result[1]) = 0x80040000; +- *((int*)& __m128_result[0]) = 0x80020000; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000170017; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000170017; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000170017; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000170017; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000170017; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000170017; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000170017; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000170017; +- __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0003000000010000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000000010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x000000ff; +- *((int*)& __m128_op0[2]) = 0x808000ff; +- *((int*)& __m128_op0[1]) = 0x000000ff; +- *((int*)& __m128_op0[0]) = 0x808000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x3); +- *((unsigned long*)& __m128i_op0[1]) = 0x8006000080020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8004000080020000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffff8fffffff8; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff8fffffff8; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001700170017; +- __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8006000080020000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8004000080020000; +- *((unsigned long*)& __m128i_result[1]) = 0x8006000080020000; +- *((unsigned long*)& __m128i_result[0]) = 0x8004000080020000; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000000010001; +- *((unsigned long*)& __m128i_result[1]) = 0x00003f8000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00003f8000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; +- unsigned_long_int_result = 0x3f8000003f800000; +- unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); +- *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1fc000001fc00000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1fc000001fc00000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000000010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128d_result[1]) = 0x8000ffff00000000; +- *((unsigned long*)& __m128d_result[0]) = 0x8000ffff00000000; +- __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x28); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001700170017; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vclz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000ffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000ffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000fefe00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000fefe00000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00003f8000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00003f8000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000ffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000ffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000080003f80ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x28); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00003f8000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00003f8000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfrecip_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000000d; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000000d; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000000d; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000d; +- __m256i_out = __lasx_xvaddi_du(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3ff0010000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3ff0010000000000; +- __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000d; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000d; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000d; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00003f8000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00003f8000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x003f800000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x003f800000000000; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xd2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000080003f80ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op2[1]) = 0x3ff0010000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x3ff0010000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000080003f80ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00008000; +- *((int*)& __m128_op1[2]) = 0x3f80ffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003f800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff0000ffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xff0000ffffffffff; +- __m128i_out = __lsx_vslti_bu(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003f800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f800000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000080003f80ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000000020000; +- *((unsigned long*)& __m128i_result[0]) = 0x000001fc00000000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0010000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ff0010000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3fffff0000000000; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000000020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000001fc00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000000020000; +- *((unsigned long*)& __m128i_result[0]) = 0x000001fc00000000; +- __m128i_out = __lsx_vmaxi_h(__m128i_op0,-7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; +- __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100007f01; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00020000; +- *((int*)& __m128_op0[2]) = 0x00020000; +- *((int*)& __m128_op0[1]) = 0x000001fc; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000100007f01; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000001; +- *((int*)& __m128_op0[2]) = 0x00007f01; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000000020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000001fc00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000000020000; +- __m128i_out = __lsx_vexth_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000000020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000001fc00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000000010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010100000000; +- __m128i_out = __lsx_vsat_bu(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- long_op0 = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x3f8000003f800000; +- __m128i_out = __lsx_vreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x3c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x000000fe; +- *((int*)& __m128_op0[2]) = 0x808000ff; +- *((int*)& __m128_op0[1]) = 0x000000fe; +- *((int*)& __m128_op0[0]) = 0x808000fe; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000003fffff00; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003fffff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x80000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x80000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x80000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x80000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x0000ffff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x0000ffff; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000001; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000001; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000001; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000001; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000001; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000001; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000001; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x80000001; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ffff000000ff00; +- __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f7fff003f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f7fff003f800000; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x3f80000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3f80000000000000; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3f80000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3f80000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x1fc0000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1fc07f8000007f80; +- __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x03c0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x03c0038000000380; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; +- __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x03c0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x03c0038000000380; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0f0000000f000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc1000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffcc000b000b000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000b000b010a000b; +- __m128i_out = __lsx_vaddi_hu(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; +- __m256i_out = __lasx_xvclo_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffcc000b000b000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000b000b010a000b; +- *((unsigned long*)& __m128i_result[1]) = 0x7f7f000b000b000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000b000b010a000b; +- __m128i_out = __lsx_vsat_bu(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3fffff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3ffffeffffffffe5; +- *((unsigned long*)& __m128i_result[0]) = 0x3ffffeffffffffe5; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc1000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffc1000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff000000007fff; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03c0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x03c0038000000380; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ffff000000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x03c0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x03c0038000000380; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ffff000000ff00; +- __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_du(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000001; +- __m256i_out = __lasx_xvreplve0_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03c0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x03c0038000000380; +- *((unsigned long*)& __m128i_result[1]) = 0x000003c000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_w_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,-2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000010a000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ffff0000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ffff000000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000010a000b; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f7f000b000b000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000b000b010a000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0101080408040804; +- *((unsigned long*)& __m128i_result[0]) = 0x0804080407040804; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; +- __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_result[2]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_result[1]) = 0x0909090909090909; +- *((unsigned long*)& __m256i_result[0]) = 0x0909090909090909; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x66); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff000000007fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101080408040804; +- *((unsigned long*)& __m128i_op0[0]) = 0x0804080407040804; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000010a000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0101080408040804; +- *((unsigned long*)& __m128i_result[0]) = 0x000100810080e081; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000010a000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000104000800; +- __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000003fc0; +- __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x22); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000800000000000; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x0000ffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000ffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x0000ffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000ffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe50000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffe020; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fc00000010a000b; +- *((unsigned long*)& __m128i_result[1]) = 0x00001b0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x4d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0101080408040804; +- *((unsigned long*)& __m128d_op0[0]) = 0x0804080407040804; +- *((unsigned long*)& __m128d_op1[1]) = 0x0101080408040804; +- *((unsigned long*)& __m128d_op1[0]) = 0x0804080407040804; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; +- *((unsigned long*)& __m128i_op1[1]) = 0x0101080408040804; +- *((unsigned long*)& __m128i_op1[0]) = 0x0804080407040804; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000104000800; +- __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_result[2]) = 0x00c200c200c200bb; +- *((unsigned long*)& __m256i_result[1]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_result[0]) = 0x00c200c200c200bb; +- __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100089bde; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000104000800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x80044def00000001; +- __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x80044def00000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x00007f8449a19084; +- *((unsigned long*)& __m128i_result[0]) = 0x49a210000000ff00; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000104000800; +- __m128i_out = __lsx_vextl_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_op0[2]) = 0x00c200c200c200bb; +- *((unsigned long*)& __m256i_op0[1]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_op0[0]) = 0x00c200c200c200bb; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc2c2ffffc2c2; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffc2c2ffffc2c2; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffc2c2ffffc2c2; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffc2c2ffffc2c2; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x003100310031002f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x003100310031002f; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_op0[2]) = 0x00c200c200c200bb; +- *((unsigned long*)& __m256i_op0[1]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_op0[0]) = 0x00c200c200c200bb; +- *((unsigned long*)& __m256i_result[3]) = 0x007fffff007fffff; +- *((unsigned long*)& __m256i_result[2]) = 0x007fffff007fffff; +- *((unsigned long*)& __m256i_result[1]) = 0x007fffff007fffff; +- *((unsigned long*)& __m256i_result[0]) = 0x007fffff007fffff; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_op0[2]) = 0x00c200c200c200bb; +- *((unsigned long*)& __m256i_op0[1]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_op0[0]) = 0x00c200c200c200bb; +- *((unsigned long*)& __m256i_op1[3]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op1[2]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op1[1]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op1[0]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00070000; +- *((int*)& __m128_op0[2]) = 0x00050000; +- *((int*)& __m128_op0[1]) = 0x00030000; +- *((int*)& __m128_op0[0]) = 0x00010000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0xff81007c; +- *((int*)& __m128_op1[1]) = 0xffb7005f; +- *((int*)& __m128_op1[0]) = 0x0070007c; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000805; +- *((unsigned long*)& __m128i_op0[0]) = 0x978d95ac768d8784; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000104000800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000897957687; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000408; +- __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; +- *((unsigned long*)& __m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; +- *((unsigned long*)& __m128i_result[0]) = 0xf7f7f7f7f7f7fbff; +- __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x007fffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x007fffff007fffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x007fffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x007fffff007fffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_op1[2]) = 0x00c200c200c200bb; +- *((unsigned long*)& __m256i_op1[1]) = 0x00c200c200c200c2; +- *((unsigned long*)& __m256i_op1[0]) = 0x00c200c200c200bb; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffbdff3cffbdff44; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffbdff3cffbdff44; +- __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf7f7f7ff8e8c6d7e; +- *((unsigned long*)& __m128i_op0[0]) = 0xf7f7f7f7f7f7fbff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; +- *((unsigned long*)& __m128i_result[0]) = 0xf7f7f7f7f7f7fbff; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000897957687; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000408; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000100; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0007000000050000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffbdff3cffbdff44; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffbdff3cffbdff44; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000001dc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000001dc; +- __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000008; +- *((int*)& __m128_op0[2]) = 0x97957687; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000408; +- *((int*)& __m128_op1[3]) = 0x00000008; +- *((int*)& __m128_op1[2]) = 0x97957687; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000408; +- *((int*)& __m128_op2[3]) = 0x00010001; +- *((int*)& __m128_op2[2]) = 0x00010001; +- *((int*)& __m128_op2[1]) = 0x00010001; +- *((int*)& __m128_op2[0]) = 0x04000800; +- *((int*)& __m128_result[3]) = 0x80010001; +- *((int*)& __m128_result[2]) = 0x80010001; +- *((int*)& __m128_result[1]) = 0x80010001; +- *((int*)& __m128_result[0]) = 0x84000800; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; +- *((unsigned long*)& __m128i_op1[1]) = 0x8001000180010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x8001000184000800; +- *((unsigned long*)& __m128i_result[1]) = 0xffff80007e028401; +- *((unsigned long*)& __m128i_result[0]) = 0x9a10144000400000; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000007ae567a3e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000700ff00000000; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000104000800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000040004000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0010002000000000; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff81007c; +- *((unsigned long*)& __m128i_op0[0]) = 0xffb7005f0070007c; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000104000800; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000007c; +- *((unsigned long*)& __m128i_result[0]) = 0x0000005f0003e000; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffbdff3cffbdff44; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffbdff3cffbdff44; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff7effffff46; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff7effffff46; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0x42); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000000001dc; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000000001dc; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x00000000000001dc; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x00000000000001dc; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x80000000000001dc; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x80000000000001dc; +- __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001dc; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001dc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffbdff3cffbdff44; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffbdff3cffbdff44; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffbdff3cffbdff44; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffbdff3cffbdff44; +- *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[2]) = 0xfe8bfe0efe8bfe12; +- *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0xfe8bfe0efe8bfe12; +- __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op0[2]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op0[1]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op0[0]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op1[3]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op1[2]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op1[1]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op1[0]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_result[2]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_result[0]) = 0xc2c2c2c2c2c2c2c2; +- __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; +- *((unsigned long*)& __m256i_result[3]) = 0xfffe000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffe000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffe000000000000; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x31); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffff7e; +- *((int*)& __m256_op0[4]) = 0xffffff46; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffff7e; +- *((int*)& __m256_op0[0]) = 0xffffff46; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001dc; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001dc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff24; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff24; +- __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfe8bfe0efe8bfe12; +- *((unsigned long*)& __m256i_op1[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfe8bfe0efe8bfe12; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff81007c; +- *((unsigned long*)& __m128i_op0[0]) = 0xffb7005f0070007c; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff80007e028401; +- *((unsigned long*)& __m128i_op1[0]) = 0x9a10144000400000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001ffff00010; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x5b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x80010001b57fc565; +- *((unsigned long*)& __m128i_op0[0]) = 0x8001000184000be0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x80010001b57fc565; +- *((unsigned long*)& __m128i_result[0]) = 0x8001000184000be0; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000700ff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000040004000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0010002000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000700ff00000000; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000005f0003e000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000897957687; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000408; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff24; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff24; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; +- __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; +- *((unsigned long*)& __m128i_op1[1]) = 0x80010001b57fc565; +- *((unsigned long*)& __m128i_op1[0]) = 0x8001000184000be0; +- *((unsigned long*)& __m128i_result[1]) = 0x000000080001fffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000040004000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010002000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000ed0e0; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000004080; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x1111111111111111; +- *((unsigned long*)& __m128i_result[0]) = 0x1111111111111111; +- __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff00ffff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0xfcfcfc00fcfc00fc; +- *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcfcfcfcfc00; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000897957687; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000408; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff0007e215b122; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ffeffff7bfff828; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff80010001; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff80010001; +- __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfefefefe; +- *((int*)& __m256_op0[6]) = 0xfefefefe; +- *((int*)& __m256_op0[5]) = 0xfe8bfe0e; +- *((int*)& __m256_op0[4]) = 0xfe8bfe12; +- *((int*)& __m256_op0[3]) = 0xfefefefe; +- *((int*)& __m256_op0[2]) = 0xfefefefe; +- *((int*)& __m256_op0[1]) = 0xfe8bfe0e; +- *((int*)& __m256_op0[0]) = 0xfe8bfe12; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x80010009816ac5de; +- *((unsigned long*)& __m128i_op0[0]) = 0x8001000184000bd8; +- *((unsigned long*)& __m128i_result[1]) = 0x0bd80bd80bd80bd8; +- *((unsigned long*)& __m128i_result[0]) = 0x0bd80bd80bd80bd8; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000007; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ed0e0; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000004080; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000ed0e0; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000004080; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x80000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x80000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x80000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x80000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x80000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x80000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x80000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x80000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op0[2]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op0[1]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op0[0]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffa; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffa; +- *((unsigned long*)& __m256i_result[3]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_result[2]) = 0x6161616100000018; +- *((unsigned long*)& __m256i_result[1]) = 0x6161616161616161; +- *((unsigned long*)& __m256i_result[0]) = 0x6161616100000018; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x7fff0007; +- *((int*)& __m128_op0[2]) = 0xe215b122; +- *((int*)& __m128_op0[1]) = 0x7ffeffff; +- *((int*)& __m128_op0[0]) = 0x7bfff828; +- *((int*)& __m128_op1[3]) = 0x80010009; +- *((int*)& __m128_op1[2]) = 0x816ac5de; +- *((int*)& __m128_op1[1]) = 0x80010001; +- *((int*)& __m128_op1[0]) = 0x84000bd8; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0bd80bd80bd80bd8; +- *((unsigned long*)& __m128i_op0[0]) = 0x0bd80bd80bd80bd8; +- unsigned_long_int_result = 0x0bd80bd80bd80bd8; +- unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffa; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffa; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffa; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x59); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_h(__m256i_op0,13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x80000000b57ec564; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000083ff0be0; +- *((unsigned long*)& __m128i_result[1]) = 0x0014000000140014; +- *((unsigned long*)& __m128i_result[0]) = 0x0014000000140014; +- __m128i_out = __lsx_vmini_hu(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x80000000b57ec564; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000083ff0be0; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001b57ec563; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000183ff0bdf; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffa; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffffffa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m128i_op0[0]) = 0x5b35342c979955da; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000009; +- *((unsigned long*)& __m128i_result[0]) = 0x5b35342c970455da; +- __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x0); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000003397dd140; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000004bd7cdd20; +- *((unsigned long*)& __m128i_op1[1]) = 0x0016ffb00016ffb0; +- *((unsigned long*)& __m128i_op1[0]) = 0x0016ffb00016ffb0; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000004a294b; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000006d04bc; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffffffa; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffffffa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x2a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000004a294b; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000006d04bc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0016001600160016; +- *((unsigned long*)& __m256i_result[2]) = 0x0016001600160016; +- *((unsigned long*)& __m256i_result[1]) = 0x0016001600160016; +- *((unsigned long*)& __m256i_result[0]) = 0x0016001600160016; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x7fff0007e215b122; +- *((unsigned long*)& __m128d_op1[0]) = 0x7ffeffff7bfff828; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0bd80bd80bdfffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0bd80bd80bd80000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0bd80bd80bdfffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0bd80bd80bd80000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0bef0b880bd80bd8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0bd80bd80bdfffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0bd80bd80bd80000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000017b017b01; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x5b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0016001600160016; +- *((unsigned long*)& __m256i_op0[2]) = 0x0016001600160016; +- *((unsigned long*)& __m256i_op0[1]) = 0x0016001600160016; +- *((unsigned long*)& __m256i_op0[0]) = 0x0016001600160016; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,-12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x0bd80bd8; +- *((int*)& __m128_op1[2]) = 0x0bdfffff; +- *((int*)& __m128_op1[1]) = 0x0bd80bd8; +- *((int*)& __m128_op1[0]) = 0x0bd80000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80010001; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff80010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0bd80bd80bdfffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0bd80bd80bd80000; +- *((unsigned long*)& __m128i_result[1]) = 0x1ffffffff8001000; +- *((unsigned long*)& __m128i_result[0]) = 0xf0bd80bd80bd8000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x1ffffffff8001000; +- *((unsigned long*)& __m128i_op1[0]) = 0xf0bd80bd80bd8000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff7ffffffefffe; +- *((unsigned long*)& __m128i_result[0]) = 0xdfffdfffdffffffe; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xd9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffe0001fffe0001; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffe0001fffe0001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1ffffffff8001000; +- *((unsigned long*)& __m128i_op1[0]) = 0xf0bd80bd80bd8000; +- *((unsigned long*)& __m128i_result[1]) = 0x1ffffffff8001000; +- *((unsigned long*)& __m128i_result[0]) = 0xf0bd80bd80bd8000; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xfffe0001; +- *((int*)& __m128_op0[2]) = 0xfffe0001; +- *((int*)& __m128_op0[1]) = 0xfffe0001; +- *((int*)& __m128_op0[0]) = 0xfffe0001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xfffe0001; +- *((int*)& __m128_result[2]) = 0xfffe0001; +- *((int*)& __m128_result[1]) = 0xfffe0001; +- *((int*)& __m128_result[0]) = 0xfffe0001; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffdfffffffdff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffdfffffffdff; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x37); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffe0001fffe0001; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffe0001fffe0001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xfffe0001fffe0001; +- *((unsigned long*)& __m128i_op2[0]) = 0xfffe0001fffe0001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffefffffffe; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000001c; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000001c; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000001c; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000001c; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0bd80bd80bdfffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0bd80bd80bd80000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0bd80bd80bd80000; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xf9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1ffffffff8001000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf0bd80bd80bd8000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffe0001fffe0001; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffe0001fffe0001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffefffffffe; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0x8); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m128i_result[0]) = 0x3d3d3d3d3d3d3d3d; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x3d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0010000000000000; +- __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m128i_op0[0]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x3d3d3d3d; +- *((int*)& __m128_op0[2]) = 0x3d3d3d3d; +- *((int*)& __m128_op0[1]) = 0x3d3d3d3d; +- *((int*)& __m128_op0[0]) = 0x3d3d3d3d; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00100000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x0000bd3d; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000c00; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000bd3d00000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000c00; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00bd003d; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000bd3d00000000; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000bd3d00000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000bd3d00000000; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; +- __m128i_out = __lsx_vbitseti_h(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000020202020; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x3a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202020; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; +- __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000bd003d; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[1]) = 0x0013001300130013; +- *((unsigned long*)& __m128i_result[0]) = 0x0013001300130013; +- __m128i_out = __lsx_vmini_hu(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; +- __m256i_out = __lasx_xvreplve0_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00130013; +- *((int*)& __m128_op0[2]) = 0x00130013; +- *((int*)& __m128_op0[1]) = 0x00130013; +- *((int*)& __m128_op0[0]) = 0x00130013; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffff0000000ad3d; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffff000fffff000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0000; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffff0000000ad3d; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff000fffff000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xefffdffff0009d3d; +- __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000bd3d00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000bd3d00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000bd3d00000000; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000c0000bd49; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000c7fff000c; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffff0000000ad3d; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff000fffff000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffff00010001000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff000fffff000; +- __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xefffdffff0009d3d; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000bd3d; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0000; +- __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[3]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_result[2]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_result[1]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_result[0]) = 0x0020002000400040; +- __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,-10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000bd30; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000d7fff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000007a6d; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000dfefe0000; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000005555; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000005555; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; +- *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000c7fff000c; +- *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; +- __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; +- __m256i_out = __lasx_xvclz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_b(__m128i_op0,0xb); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op1[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[3]) = 0x0008000800080008; +- *((unsigned long*)& __m256i_result[2]) = 0x0008000800080008; +- *((unsigned long*)& __m256i_result[1]) = 0x0008000800080008; +- *((unsigned long*)& __m256i_result[0]) = 0x0008000800080008; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0020002000400040; +- *((unsigned long*)& __m256i_result[3]) = 0x0010001000200020; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000200020; +- *((unsigned long*)& __m256i_result[1]) = 0x0010001000200020; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000200020; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000bd3d; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000c7fff000c; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000006ffef000; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000005; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000005; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00060000; +- *((int*)& __m256_op0[6]) = 0x00040000; +- *((int*)& __m256_op0[5]) = 0x00020000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00060000; +- *((int*)& __m256_op0[2]) = 0x00040000; +- *((int*)& __m256_op0[1]) = 0x00020000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00060000; +- *((int*)& __m256_op1[6]) = 0x00040000; +- *((int*)& __m256_op1[5]) = 0x00020000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00060000; +- *((int*)& __m256_op1[2]) = 0x00040000; +- *((int*)& __m256_op1[1]) = 0x00020000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0x0000000c; +- *((int*)& __m128_op0[2]) = 0x7fff000c; +- *((int*)& __m128_op0[1]) = 0x10001000; +- *((int*)& __m128_op0[0]) = 0x10001000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; +- __m128i_out = __lsx_vfclass_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00050008000e0010; +- *((unsigned long*)& __m256i_op1[2]) = 0x0007000800100010; +- *((unsigned long*)& __m256i_op1[1]) = 0x00050008000e0010; +- *((unsigned long*)& __m256i_op1[0]) = 0x0007000800100010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffff000f0008d3c; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff0016fff8d3d; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffff000f0008d3c; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffff0016fff8d3d; +- *((unsigned long*)& __m128i_result[1]) = 0xe10000004deb2610; +- *((unsigned long*)& __m128i_result[0]) = 0xe101e0014dec4089; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xe1000000; +- *((int*)& __m128_op0[2]) = 0x4deb2610; +- *((int*)& __m128_op0[1]) = 0xe101e001; +- *((int*)& __m128_op0[0]) = 0x4dec4089; +- *((unsigned long*)& __m128i_result[1]) = 0x800000001d64c200; +- *((unsigned long*)& __m128i_result[0]) = 0x800000001d881120; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0006000000020000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0006000000020000; +- __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[2]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000008000000080; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000008000000080; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000c7fff000c; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op2[1]) = 0xfffff000f0008d3c; +- *((unsigned long*)& __m128i_op2[0]) = 0xfffff0016fff8d3d; +- *((unsigned long*)& __m128i_result[1]) = 0x00000100f8100002; +- *((unsigned long*)& __m128i_result[0]) = 0xfff0ff8006f0f950; +- __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000008000000080; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000008000000080; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000008000000080; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000008000000080; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x95); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000080; +- *((int*)& __m256_op0[6]) = 0x00000080; +- *((int*)& __m256_op0[5]) = 0x00000080; +- *((int*)& __m256_op0[4]) = 0x00000080; +- *((int*)& __m256_op0[3]) = 0x00000080; +- *((int*)& __m256_op0[2]) = 0x00000080; +- *((int*)& __m256_op0[1]) = 0x00000080; +- *((int*)& __m256_op0[0]) = 0x00000080; +- *((int*)& __m256_op1[7]) = 0x00000001; +- *((int*)& __m256_op1[6]) = 0x00000001; +- *((int*)& __m256_op1[5]) = 0x00000001; +- *((int*)& __m256_op1[4]) = 0x00000001; +- *((int*)& __m256_op1[3]) = 0x00000001; +- *((int*)& __m256_op1[2]) = 0x00000001; +- *((int*)& __m256_op1[1]) = 0x00000001; +- *((int*)& __m256_op1[0]) = 0x00000001; +- *((int*)& __m256_result[7]) = 0x00000001; +- *((int*)& __m256_result[6]) = 0x00000001; +- *((int*)& __m256_result[5]) = 0x00000001; +- *((int*)& __m256_result[4]) = 0x00000001; +- *((int*)& __m256_result[3]) = 0x00000001; +- *((int*)& __m256_result[2]) = 0x00000001; +- *((int*)& __m256_result[1]) = 0x00000001; +- *((int*)& __m256_result[0]) = 0x00000001; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000f0009d3c; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000016fff9d3d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000bd0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000007f0; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000916c; +- *((unsigned long*)& __m128i_result[0]) = 0x000000010000954d; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000f0009d3c; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000016fff9d3d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000c000000060003; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000c0000bd49; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000c7fff000c; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000100c6ffef00d; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000f0009d3c; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000016fff9d3d; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffff000f0008d3c; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffff0016fff8d3d; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff000000003c3c; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff0101ffff3d3d; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff00010000fff; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x800000001d64c200; +- *((unsigned long*)& __m128d_op0[0]) = 0x800000001d881120; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000000f0009d3c; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000016fff9dff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; +- __m256d_out = __lasx_xvflogb_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_d(__m256i_op0,14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffff01; +- __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf0000000f0000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xf0000000f0000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff07effffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100110002; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff00010000fff; +- __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,-4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000200; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000200; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000200; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000200; +- *((int*)& __m256_op2[7]) = 0xffffffa0; +- *((int*)& __m256_op2[6]) = 0x00000001; +- *((int*)& __m256_op2[5]) = 0xffffffe0; +- *((int*)& __m256_op2[4]) = 0x00000001; +- *((int*)& __m256_op2[3]) = 0xffffffa0; +- *((int*)& __m256_op2[2]) = 0x00000001; +- *((int*)& __m256_op2[1]) = 0xffffffe0; +- *((int*)& __m256_op2[0]) = 0x00000001; +- *((int*)& __m256_result[7]) = 0xffffffa0; +- *((int*)& __m256_result[6]) = 0x80000001; +- *((int*)& __m256_result[5]) = 0xffffffe0; +- *((int*)& __m256_result[4]) = 0x80000001; +- *((int*)& __m256_result[3]) = 0xffffffa0; +- *((int*)& __m256_result[2]) = 0x80000001; +- *((int*)& __m256_result[1]) = 0xffffffe0; +- *((int*)& __m256_result[0]) = 0x80000001; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_wu(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffa080000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffe080000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffa080000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffe080000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000010000f00; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000010000f01; +- __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000100f8100002; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff0ff8006f0f950; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x006f0efe258ca851; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffff00010000fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffff00; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00002f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958aefff895e; +- *((unsigned long*)& __m128i_result[1]) = 0xfafafafafafafafa; +- *((unsigned long*)& __m128i_result[0]) = 0xfafa958aeffa89fa; +- __m128i_out = __lsx_vmini_b(__m128i_op0,-6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x24); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ffff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000100c6ffef10c; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffff01; +- *((unsigned long*)& __m128i_result[0]) = 0xffffeff400000df4; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000100c6ffef10c; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff70; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff9001a47e; +- __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000067400002685; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000100c6ffef10c; +- unsigned_int_result = 0x00000000000000ff; +- unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffff01; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffeff400000df4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ff91fffffff5; +- *((unsigned long*)& __m128i_result[0]) = 0xffff00650001ffb0; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ff91fffffff5; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff00650001ffb0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000067400002685; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ff91fffffff5; +- *((unsigned long*)& __m128i_result[0]) = 0xffff00650000ff85; +- __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x24); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffff01; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffeff400000df4; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff03fe; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe9df0000e81b; +- __m128i_out = __lsx_vrotri_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x006f0efe258ca851; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff90ffffe0f5; +- *((unsigned long*)& __m128i_result[0]) = 0x006e7973258d0ef4; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00060000; +- *((int*)& __m256_op0[6]) = 0x00040000; +- *((int*)& __m256_op0[5]) = 0x00020000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00060000; +- *((int*)& __m256_op0[2]) = 0x00040000; +- *((int*)& __m256_op0[1]) = 0x00020000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_result[0]) = 0x0000c000ffffc000; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; +- __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ff91fffffff5; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff00650001ffb0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffff0001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffff0001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000c000ffffc000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000c000ffffc000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128d_result[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128d_result[0]) = 0x0000958affff995d; +- __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000958affff995d; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000c000ffffc000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000006f00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000c00000000000; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffefffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010401; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010401; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010401; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010401; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000fdfc0000fd03; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; +- __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000202020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000404040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000202020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000404040; +- __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x68); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000404040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000202020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000404040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000202020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000404040; +- __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404240; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404240; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404240; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404240; +- *((unsigned long*)& __m256i_result[3]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x00007f7f00007f7f; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffefffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000095896a760000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x006f0efe258ca851; +- *((unsigned long*)& __m128i_op2[1]) = 0xffff7fc8ffff8000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffff200000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000015516a768038; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff9ed2e1c000; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x40404040; +- *((int*)& __m256_op0[6]) = 0x40404040; +- *((int*)& __m256_op0[5]) = 0x40404040; +- *((int*)& __m256_op0[4]) = 0x40404040; +- *((int*)& __m256_op0[3]) = 0x40404040; +- *((int*)& __m256_op0[2]) = 0x40404040; +- *((int*)& __m256_op0[1]) = 0x40404040; +- *((int*)& __m256_op0[0]) = 0x40404040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; +- __m256i_out = __lasx_xvftintl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_result[0]) = 0x0000958affff995d; +- __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000015516a768038; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffff9ed2e1c000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_result[0]) = 0x0000958affff995d; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000c00000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000bfffffffe0f6; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000bfffffffe0f6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff7a53; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000007f0000007f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000007f0000007f; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff80ff01ff80; +- *((unsigned long*)& __m256i_result[0]) = 0xff01ff800000007e; +- __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x80000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000007f0000007f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000007f0000007f; +- *((unsigned long*)& __m256i_op0[1]) = 0xff01ff80ff01ff80; +- *((unsigned long*)& __m256i_op0[0]) = 0xff01ff800000007e; +- *((unsigned long*)& __m256i_result[3]) = 0x003f8000003f8000; +- *((unsigned long*)& __m256i_result[2]) = 0x003f8000003f8000; +- *((unsigned long*)& __m256i_result[1]) = 0xffc07f80ffc07f80; +- *((unsigned long*)& __m256i_result[0]) = 0xffc07f80003f0000; +- __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x36de0000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x3be14000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000030000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000030000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x24); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000036de0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003be14000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000007e8a60; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000001edde; +- __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000007e8a60; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000001edde; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; +- __m128i_out = __lsx_vclo_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000036de0000; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000003be14000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000030000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000030000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000018002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000018002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff7a53; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vslei_hu(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00018002; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000002; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00018002; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000002; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00030000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00030000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000007e8a60; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000001edde; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; +- *((unsigned long*)& __m128i_result[1]) = 0x000000de00003e14; +- *((unsigned long*)& __m128i_result[0]) = 0x00012b15ffff32ba; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x3f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff7a53; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_q(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff7a53; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,-12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000036de0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000003be14000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_op2[0]) = 0x00000000ffff7a53; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000001f0000; +- __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; +- __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000000; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_result[1]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003be14000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000003bfb4000; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,-5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x55); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffe0001fffe0003; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000000000002; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_w_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; +- __m256i_out = __lasx_xvabsd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x36); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000bfffffffe0f6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000010001000a; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; +- __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003bfb4000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000003bfb4000; +- __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000003bfb4000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000003bfb4000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000000; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_b(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000de0000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; +- __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000006f00000000; +- __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x0000006f; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000037; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x2f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000de0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000006f00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001f0a; +- __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000006f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000037; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001f0a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000036; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000002; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000020000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000020000000200; +- __m128i_out = __lsx_vfclass_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000800000000; +- __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000050000007b; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000500000005; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- int_op1 = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_result[2]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_result[1]) = 0xfff9fff9fff9fff9; +- *((unsigned long*)& __m256i_result[0]) = 0xfff9fff9fff9fff9; +- __m256i_out = __lasx_xvmini_h(__m256i_op0,-7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; +- __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000800000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000100010001007c; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x007b01ec007b3a9e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; +- __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100010000fe7c; +- *((unsigned long*)& __m128i_op0[0]) = 0x000100010000fe01; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000100010000fe01; +- __m128i_out = __lsx_vextl_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000060; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000100010000fe01; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000050000007b; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000500000005; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffbffffff85; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffc0000fdfc; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_result[2]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_result[1]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_result[0]) = 0xfff3fff3fff3fff3; +- __m256i_out = __lasx_xvmini_h(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; +- __m128d_out = __lsx_vflogb_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfff1000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0xfff1000100010001; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000070; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff5; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00010001; +- *((int*)& __m128_op0[2]) = 0x00010001; +- *((int*)& __m128_op0[1]) = 0x00010001; +- *((int*)& __m128_op0[0]) = 0x00010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vftintrp_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_du(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100010000fe7c; +- *((unsigned long*)& __m128i_op0[0]) = 0x000100010000fe01; +- *((unsigned long*)& __m128i_result[1]) = 0x000f000f00100000; +- *((unsigned long*)& __m128i_result[0]) = 0x000f000f00100000; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000001a00; +- __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000001a00; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00010002; +- *((int*)& __m128_op0[2]) = 0x0000fe7d; +- *((int*)& __m128_op0[1]) = 0x00010002; +- *((int*)& __m128_op0[0]) = 0x0000fe02; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x0000007b; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000001a00; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff3fff3fff3fff3; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00f300ff00f3; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00f300ff00f3; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00f300ff00f3; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00f300ff00f3; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x000100010001007c; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000007b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_d(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffbffffff85; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffc0000fdfc; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); +- *((unsigned long*)& __m128i_op0[1]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x0000007b; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000001a00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x35); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff8fff8fff8fff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x3b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0003000300030004; +- *((unsigned long*)& __m128i_result[0]) = 0x0003000300030004; +- __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000004; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0204; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000001007c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00010001; +- *((int*)& __m128_op1[2]) = 0x0001007c; +- *((int*)& __m128_op1[1]) = 0x00010001; +- *((int*)& __m128_op1[0]) = 0x00010001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4429146a7b4c88b2; +- *((unsigned long*)& __m128i_op1[0]) = 0xe22b3595efa4aa0c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000442900007b4c; +- *((unsigned long*)& __m128i_result[0]) = 0x0000e22b0000efa4; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000004; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0204; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000442900007b4c; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000e22b0000efa4; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000442800007b50; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0204; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000442800007b50; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0204; +- *((int*)& __m128_result[3]) = 0x46885000; +- *((int*)& __m128_result[2]) = 0x46f6a000; +- *((int*)& __m128_result[1]) = 0x4f800000; +- *((int*)& __m128_result[0]) = 0x4f7fff02; +- __m128_out = __lsx_vffint_s_wu(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x4688500046f6a000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f8000004f7fff02; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffffff03ffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00013fff; +- __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ffffff03ffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00013fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000088500000f6a0; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001fffd00000407; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000442900007b4c; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000e22b0000efa4; +- *((unsigned long*)& __m128i_result[1]) = 0x00ffffff03ffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00013fff; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000100010001007c; +- __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000100000001007c; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000000010000; +- __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000020000007d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000800000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000001f400000; +- __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128d_result[1]) = 0x40f0001000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x40f0001000000000; +- __m128d_out = __lsx_vffintl_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000020000007d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000746400016388; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000586100015567; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0800000200000002; +- *((unsigned long*)& __m128i_result[0]) = 0x000000020000007d; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x40f0001000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x40f0001000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010001; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00800000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x1f400000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffa8ff9f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffabff99; +- *((unsigned long*)& __m128i_op1[1]) = 0x000100000002007d; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000000020001; +- *((unsigned long*)& __m128i_result[1]) = 0x00010000ffab001c; +- *((unsigned long*)& __m128i_result[0]) = 0x0001ffffffadff9a; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0xffa8ff9f; +- *((int*)& __m128_op1[1]) = 0x0000ffff; +- *((int*)& __m128_op1[0]) = 0xffabff99; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x6d6d6d6d6d6d6d6d; +- *((unsigned long*)& __m256i_result[2]) = 0x6d6d6d6d6d6d6d6d; +- *((unsigned long*)& __m256i_result[1]) = 0x6d6d6d6d6d6d6d6d; +- *((unsigned long*)& __m256i_result[0]) = 0x6d6d6d6d6d6d6d6d; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0x6d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00010001; +- *((int*)& __m128_op1[2]) = 0x0001007c; +- *((int*)& __m128_op1[1]) = 0x00010001; +- *((int*)& __m128_op1[0]) = 0x00010001; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00010000ffab001c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001ffffffadff9a; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vslti_hu(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[6]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[5]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[4]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[3]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[2]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[1]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[0]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[7]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[6]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[5]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[4]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[3]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[2]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[1]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[0]) = 0x6d6d6d6d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x40f0001000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x40f0001000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; +- __m128i_out = __lsx_vslei_hu(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[6]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[5]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[4]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[3]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[2]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[1]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[0]) = 0x6d6d6d6d; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x40f0001000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x40f0001000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_result[1]) = 0x40f0001000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100010001; +- __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128d_result[0]) = 0xfffcfffcfffcfffc; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00003fff00003fff; +- __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[6]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[5]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[4]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[3]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[2]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[1]) = 0x6d6d6d6d; +- *((int*)& __m256_op0[0]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[7]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[6]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[5]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[4]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[3]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[2]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[1]) = 0x6d6d6d6d; +- *((int*)& __m256_op1[0]) = 0x6d6d6d6d; +- *((unsigned long*)& __m256i_result[3]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[2]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[1]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[0]) = 0x7c007c007c007c00; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x000000000000ffff; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x40f0001000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x40f0001000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x1e0200001e020000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffd; +- *((unsigned long*)& __m128i_result[0]) = 0xfffcfffdfffcfffd; +- __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffd; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffdfffcfffd; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff7f7f7fff7fffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff7f7f7fff7fffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3f7f7f7eff800000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3f7f7f7eff800000; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1e0200001e020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffd; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffdfffcfffd; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffcfffffffd; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffdfffffffd; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffd; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffdfffcfffd; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffcfffdfffcfffd; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x3f7f7f7e; +- *((int*)& __m256_op1[4]) = 0xff800000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x3f7f7f7e; +- *((int*)& __m256_op1[0]) = 0xff800000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x7fffffff; +- *((int*)& __m256_op2[4]) = 0xff7fffff; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x7fffffff; +- *((int*)& __m256_op2[0]) = 0xff7fffff; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x7fffffff; +- *((int*)& __m256_result[4]) = 0x7fc00000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x7fffffff; +- *((int*)& __m256_result[0]) = 0x7fc00000; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8080808000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8080808000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x3f7f7f7eff800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x3f7f7f7eff800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007efeff00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007efeff00; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffff7fffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffff7fffff; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007efeff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007efeff00; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007aff7c00; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007aff7c00; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffd017d00; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007efeff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007efeff00; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000008e7c00; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000067751500; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000008e7c00; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000067751500; +- __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fff9fff9; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001fff9fffa; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x007ffe7ffe400000; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x2a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x007ffe7ffe400000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x007ffd0001400840; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007aff7c00; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007aff7c00; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000008e7c00; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000067751500; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000008e7c00; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000067751500; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000007a00f8; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff01640092; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000007a00f8; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff01640092; +- __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000100640000ff92; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000100640000ff92; +- *((unsigned long*)& __m256i_result[3]) = 0x00007c0100007c01; +- *((unsigned long*)& __m256i_result[2]) = 0x00007c0100007c00; +- *((unsigned long*)& __m256i_result[1]) = 0x00007c0100007c01; +- *((unsigned long*)& __m256i_result[0]) = 0x00007c0100007c00; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x30); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007aff7c00; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007aff7c00; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_result[3]) = 0x7aff7c0000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfd017d0000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7aff7c0000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfd017d0000000000; +- __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xb3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x007ffd0001400840; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x007ffd0001400840; +- __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x007ffd0001400840; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007aff7c00; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007aff7c00; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000c7aff7c00; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000c7aff7c00; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffd017d00; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3fffffffff7f0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3fffffffff7f0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000c7aff7c00; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000c7aff7c00; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffd017d00; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000002030000; +- *((unsigned long*)& __m256i_op2[2]) = 0x030303670101fd90; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000002030000; +- *((unsigned long*)& __m256i_op2[0]) = 0x030303670101fd90; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3ffffffffc7bfc99; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3ffffffffc7bfc99; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000027d00f8; +- *((unsigned long*)& __m256i_op1[2]) = 0x040204660265fe22; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000027d00f8; +- *((unsigned long*)& __m256i_op1[0]) = 0x040204660265fe22; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffd000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x3a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x007ffd00; +- *((int*)& __m128_op2[0]) = 0x01400840; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x007ffd00; +- *((int*)& __m128_result[0]) = 0x01400840; +- __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffd000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfefa000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x007ffd0001400840; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x007ffd0001400840; +- *((unsigned long*)& __m128i_result[1]) = 0x3fffffff80000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00003ffd000a4000; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3fffffff80000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00003ffd000a4000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffcffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000fffd000a0000; +- __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xfefa0000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefa000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfefa000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_result[3]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_result[1]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; +- __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x3c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; +- __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000007a00f8; +- *((unsigned long*)& __m256d_op0[2]) = 0x00ff00ff01640092; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000007a00f8; +- *((unsigned long*)& __m256d_op0[0]) = 0x00ff00ff01640092; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3fffffff80000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00003ffd000a4000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffcffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000fffd000a0000; +- *((unsigned long*)& __m128i_result[1]) = 0xf000800080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000a00028004000; +- __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfffcffff00000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000fffd000a0000; +- *((unsigned long*)& __m128d_op1[1]) = 0xf0fd800080000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000a00028004000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xd207e90001fb16ef; +- *((unsigned long*)& __m256i_op0[2]) = 0xc8eab25698f97e90; +- *((unsigned long*)& __m256i_op0[1]) = 0xd207e90001fb16ef; +- *((unsigned long*)& __m256i_op0[0]) = 0xc8eab25698f97e90; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf0fd800080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000a00028004000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5c9c9c9ce3636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x63635c9e63692363; +- *((unsigned long*)& __m128i_op1[1]) = 0xf0fd800080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000a00028004000; +- *((unsigned long*)& __m128i_result[1]) = 0x6b9fe3649c9d6363; +- *((unsigned long*)& __m128i_result[0]) = 0x6363bc9e8b696363; +- __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5c9c9c9ce3636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x63635c9e63692363; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffe3636363; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000063692363; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xd207e90001fb16ef; +- *((unsigned long*)& __m256i_op0[2]) = 0xc8eab25698f97e90; +- *((unsigned long*)& __m256i_op0[1]) = 0xd207e90001fb16ef; +- *((unsigned long*)& __m256i_op0[0]) = 0xc8eab25698f97e90; +- *((unsigned long*)& __m256i_op1[3]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op1[2]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op1[1]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op1[0]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[3]) = 0x0007000000fb00ef; +- *((unsigned long*)& __m256i_result[2]) = 0x00ea005600f90090; +- *((unsigned long*)& __m256i_result[1]) = 0x0007000000fb00ef; +- *((unsigned long*)& __m256i_result[0]) = 0x00ea005600f90090; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5c9c9c9ce3636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x63635c9e63692363; +- *((unsigned long*)& __m128i_result[1]) = 0x000000005c9c9c9c; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffe3636363; +- __m128i_out = __lsx_vexth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256d_op0[2]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256d_op0[1]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256d_op0[0]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[3]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[2]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[1]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[0]) = 0x7c007c007c007c00; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffd000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf0fd800080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000a00028004000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000f000800000002; +- *((unsigned long*)& __m128i_result[0]) = 0x000f000000000000; +- __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffe4ffffffe4; +- __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffd000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0002ffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6b9fe3649c9d6363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363bc9e8b696363; +- *((unsigned long*)& __m128i_op1[1]) = 0x6b9fe3649c9d6363; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363bc9e8b696363; +- *((unsigned long*)& __m128i_result[1]) = 0xb9fe3640e4eb1b18; +- *((unsigned long*)& __m128i_result[0]) = 0x800000005b4b1b18; +- __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xb9fe3640e4eb1b18; +- *((unsigned long*)& __m128i_op0[0]) = 0x800000005b4b1b18; +- *((unsigned long*)& __m128i_result[1]) = 0xffffb9fe00003640; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe4eb00001b18; +- __m128i_out = __lsx_vexth_w_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[2]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[1]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op0[0]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[2]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[1]) = 0x7c007c007c007c00; +- *((unsigned long*)& __m256i_result[0]) = 0x7c007c007c007c00; +- __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002ffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67b7cf643c9d636a; +- *((unsigned long*)& __m128i_op0[0]) = 0x39d70e366f547977; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002ffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x66b34f643c9c626a; +- *((unsigned long*)& __m128i_result[0]) = 0x38d60e366e547876; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xb9fe3640e4eb1b18; +- *((unsigned long*)& __m128i_op0[0]) = 0x800000005b4b1b18; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffb9fe00003640; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe4eb00001b18; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x80001b155b4b0000; +- __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe273e273e273e273; +- *((unsigned long*)& __m256i_op0[2]) = 0xe273e273e273e273; +- *((unsigned long*)& __m256i_op0[1]) = 0xe273e273e273e273; +- *((unsigned long*)& __m256i_op0[0]) = 0xe273e273e273e273; +- *((unsigned long*)& __m256i_op1[3]) = 0xd207e90001fb16ef; +- *((unsigned long*)& __m256i_op1[2]) = 0xc8eab25698f97e90; +- *((unsigned long*)& __m256i_op1[1]) = 0xd207e90001fb16ef; +- *((unsigned long*)& __m256i_op1[0]) = 0xc8eab25698f97e90; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001c4e8ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001c4e8ffffffff; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xd207e90001fb16ef; +- *((unsigned long*)& __m256i_op0[2]) = 0xc8eab25698f97e90; +- *((unsigned long*)& __m256i_op0[1]) = 0xd207e90001fb16ef; +- *((unsigned long*)& __m256i_op0[0]) = 0xc8eab25698f97e90; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x01fb16ef98f97e90; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x01fb16ef98f97e90; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xb9fe3640e4eb1b18; +- *((unsigned long*)& __m128i_op0[0]) = 0x800000005b4b1b18; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffd000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xdcfe1b20f2f60e0c; +- *((unsigned long*)& __m128i_result[0]) = 0xc00000002e260e0c; +- __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x0001c4e8; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x0001c4e8; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001c4e8ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001c4e8ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_result[2]) = 0x0081c4e8ff7fffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_result[0]) = 0x0081c4e8ff7fffff; +- __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00002df900001700; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffe05ffffe911; +- *((unsigned long*)& __m256i_op0[1]) = 0x00002df900001700; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffe05ffffe911; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000300000003; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffcfffffffc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000300000003; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffcfffffffc; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x66b34f643c9c626a; +- *((unsigned long*)& __m128d_op0[0]) = 0x38d60e366e547876; +- *((unsigned long*)& __m128d_op1[1]) = 0x66b34f643c9c626a; +- *((unsigned long*)& __m128d_op1[0]) = 0x38d60e366e547876; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x80008000b70fb810; +- *((unsigned long*)& __m256i_op0[2]) = 0x3c0f3c0f3911b910; +- *((unsigned long*)& __m256i_op0[1]) = 0x80008000b70fb810; +- *((unsigned long*)& __m256i_op0[0]) = 0x3c0f3c0f3911b910; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff6f20; +- *((unsigned long*)& __m256i_result[2]) = 0x0000781e0000f221; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff6f20; +- *((unsigned long*)& __m256i_result[0]) = 0x0000781e0000f221; +- __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ff010000ff01; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ff010000ff01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ff010000ff01; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ff010000ff01; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffff00006c82; +- *((unsigned long*)& __m128d_op0[0]) = 0x00009b140000917b; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffff00006c82; +- *((unsigned long*)& __m128d_result[0]) = 0x00009b140000917b; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffff6f20; +- *((int*)& __m256_op0[5]) = 0x0000781e; +- *((int*)& __m256_op0[4]) = 0x0000f221; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffff6f20; +- *((int*)& __m256_op0[1]) = 0x0000781e; +- *((int*)& __m256_op0[0]) = 0x0000f221; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0xffff6f20; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0xffff6f20; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff6f20; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000781e0000f221; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff6f20; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000781e0000f221; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x80001b155b4b0000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00006c82; +- *((unsigned long*)& __m128i_op1[0]) = 0x00009b140000917b; +- *((unsigned long*)& __m128i_result[1]) = 0x80000000fffffffc; +- *((unsigned long*)& __m128i_result[0]) = 0xb150000000000000; +- __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffefffffffe; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x80001b155b4b0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x80001b155b4b0000; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff994cb09c; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc3639d96; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff6f20; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff6f20; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xdbc8000000003fff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xdbc8000000003fff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x0000ffff; +- *((int*)& __m256_op0[6]) = 0x0000ffff; +- *((int*)& __m256_op0[5]) = 0x0000ffff; +- *((int*)& __m256_op0[4]) = 0x0000ffff; +- *((int*)& __m256_op0[3]) = 0x0000ffff; +- *((int*)& __m256_op0[2]) = 0x0000ffff; +- *((int*)& __m256_op0[1]) = 0x0000ffff; +- *((int*)& __m256_op0[0]) = 0x0000ffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x20); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff994cb09c; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc3639d96; +- *((unsigned long*)& __m128i_op1[1]) = 0x20de27761210386d; +- *((unsigned long*)& __m128i_op1[0]) = 0x34632935195a123c; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff994db09c; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffc7639d96; +- __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xdbc80000; +- *((int*)& __m256_op1[6]) = 0x00003fff; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0xdbc80000; +- *((int*)& __m256_op1[2]) = 0x00003fff; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xdbc8000000003fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xdbc8000000003fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdbc8000000003fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xdbc8000000003fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff994db09c; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc7639d96; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xdbc8000000003fff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xdbc8000000003fff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0xff800000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0xff800000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandi_b(__m128i_op0,0x27); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_h(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000e0000000e; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0xecececececececec; +- *((unsigned long*)& __m128i_result[0]) = 0xecececececececec; +- __m128i_out = __lsx_vldi(1004); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0x86); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff3e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff3e; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x70); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00c100c100c100c1; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00c100c100c100c1; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; +- __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00c100c100c100c1; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00c100c100c100c1; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x003f003f003f003f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000500000005; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000500000005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000500000005; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000500000005; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; +- __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xff3eff3eff3eff3e; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000500000005; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000500000005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000500000005; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000500000005; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_result[1]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0xbf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_op1[2]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_op1[1]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_op1[0]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffb79fb74; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffb79fb74; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m256i_result[3]) = 0x000000010486048c; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000006; +- *((unsigned long*)& __m256i_result[1]) = 0x000000010486048c; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000006; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_result[3]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m256i_result[1]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0005000500050005; +- *((unsigned long*)& __m128i_result[0]) = 0x0005000500050005; +- __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x000000010486048c; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000100000006; +- *((unsigned long*)& __m256d_op1[1]) = 0x000000010486048c; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000006; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000010486048c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000010486048c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x6f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00050005; +- *((int*)& __m128_op0[2]) = 0x00050005; +- *((int*)& __m128_op0[1]) = 0x00050005; +- *((int*)& __m128_op0[0]) = 0x00050005; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffa0078fffa0074; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffa0078fffa0074; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffb79fb74; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffb79fb74; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m256i_result[3]) = 0x000100010485048a; +- *((unsigned long*)& __m256i_result[2]) = 0x0005ff870005ff86; +- *((unsigned long*)& __m256i_result[1]) = 0x000100010485048a; +- *((unsigned long*)& __m256i_result[0]) = 0x0005ff870005ff86; +- __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000020006; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffb79fb74; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffb79fb74; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xc192181230000000; +- *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xc192181230000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xd9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00050005; +- *((int*)& __m128_op1[2]) = 0x00050005; +- *((int*)& __m128_op1[1]) = 0x00050005; +- *((int*)& __m128_op1[0]) = 0x00050005; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffecffffffec; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000100010485048a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0005ff870005ff86; +- *((unsigned long*)& __m256i_op0[1]) = 0x000100010485048a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0005ff870005ff86; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_result[3]) = 0xfffeffebfb7afb62; +- *((unsigned long*)& __m256i_result[2]) = 0xfffa0065fffa0066; +- *((unsigned long*)& __m256i_result[1]) = 0xfffeffebfb7afb62; +- *((unsigned long*)& __m256i_result[0]) = 0xfffa0065fffa0066; +- __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffeffeb; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fb7afb62; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffeffeb; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fb7afb62; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffeffebfb7afb62; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffeffebfb7afb62; +- __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc192181230000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc192181230000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffa0078fffa0074; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffa0078fffa0074; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff000000ff0000; +- __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffeffebfb7afb62; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffeffebfb7afb62; +- *((unsigned long*)& __m256i_op1[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc192181230000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc192181230000000; +- *((unsigned long*)& __m256i_result[3]) = 0x4010000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3e6ce7d9cb7afb62; +- *((unsigned long*)& __m256i_result[1]) = 0x4010000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3e6ce7d9cb7afb62; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffa0078fffa0074; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffa0078fffa0074; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffa2078fffa2074; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffa2078fffa2074; +- __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffa0078fffa0074; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffa0078fffa0074; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffa2078fffa2074; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffa2078fffa2074; +- *((unsigned long*)& __m256i_result[3]) = 0x01ff01ff01ff01ff; +- *((unsigned long*)& __m256i_result[2]) = 0x01ff01ff01ff01ff; +- *((unsigned long*)& __m256i_result[1]) = 0x01ff01ff01ff01ff; +- *((unsigned long*)& __m256i_result[0]) = 0x01ff01ff01ff01ff; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffeffebfb7afb62; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffeffebfb7afb62; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[2]) = 0xfffeffebfb7afb62; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[0]) = 0xfffeffebfb7afb62; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4010000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3e6ce7d9cb7afb62; +- *((unsigned long*)& __m256i_op0[1]) = 0x4010000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3e6ce7d9cb7afb62; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2008000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1f3673ece5bd7db1; +- *((unsigned long*)& __m256i_result[1]) = 0x2008000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1f3673ece5bd7db1; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4010000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3e6ce7d9cb7afb62; +- *((unsigned long*)& __m256i_op0[1]) = 0x4010000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3e6ce7d9cb7afb62; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000401000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00003e6c0000cb7a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000401000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00003e6c0000cb7a; +- __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000401000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003e6c0000cb7a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000401000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003e6c0000cb7a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x40000000b000032d; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x40000000b000032d; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xeffc000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf064c6098d214127; +- *((unsigned long*)& __m256i_op0[1]) = 0xeffc000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf064c6098d214127; +- *((unsigned long*)& __m256i_result[3]) = 0xeffc001800180018; +- *((unsigned long*)& __m256i_result[2]) = 0xf064c6098d214127; +- *((unsigned long*)& __m256i_result[1]) = 0xeffc001800180018; +- *((unsigned long*)& __m256i_result[0]) = 0xf064c6098d214127; +- __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc192181230000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc192181230000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xff800000; +- *((int*)& __m256_result[6]) = 0xff800000; +- *((int*)& __m256_result[5]) = 0xff800000; +- *((int*)& __m256_result[4]) = 0xff800000; +- *((int*)& __m256_result[3]) = 0xff800000; +- *((int*)& __m256_result[2]) = 0xff800000; +- *((int*)& __m256_result[1]) = 0xff800000; +- *((int*)& __m256_result[0]) = 0xff800000; +- __m256_out = __lasx_xvflogb_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x29); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003030000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xff800000ff800000; +- *((unsigned long*)& __m256d_op1[2]) = 0xff800000ff800000; +- *((unsigned long*)& __m256d_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m256d_op1[0]) = 0xff800000ff800000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xff800000; +- *((int*)& __m256_result[4]) = 0xff800000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xff800000; +- *((int*)& __m256_result[0]) = 0xff800000; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_op0[2]) = 0xff820002ff820002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_op0[0]) = 0xff820002ff820002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00020002ff820002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00020002ff820002; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff800000ff800000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_result[2]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_result[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_result[0]) = 0xff80000000000000; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00020002ff820002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00020002ff820002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff82; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000003ffda00f3; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000003ffda00f3; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_op0[2]) = 0xff820002ff820002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_op0[0]) = 0xff820002ff820002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; +- __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00020002ff820002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00020002ff820002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffint_d_l(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_op2[3]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op2[2]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op2[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op2[0]) = 0xff80000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_result[0]) = 0x0800080008000800; +- __m128i_out = __lsx_vbitseti_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff80000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffecffffffec; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfe7fffecfe7fffec; +- *((unsigned long*)& __m256i_result[2]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_result[1]) = 0xfe7fffecfe7fffec; +- *((unsigned long*)& __m256i_result[0]) = 0xff80000000000000; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xf4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfe7fffecfe7fffec; +- *((unsigned long*)& __m256i_op1[2]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfe7fffecfe7fffec; +- *((unsigned long*)& __m256i_op1[0]) = 0xff80000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808000800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808000000; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0002000200020002; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfe7fffecfe7fffec; +- *((unsigned long*)& __m256i_op0[2]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfe7fffecfe7fffec; +- *((unsigned long*)& __m256i_op0[0]) = 0xff80000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_result[3]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x8000800080008000; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffdfffdfffdfffd; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_op1[0]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_result[1]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_result[0]) = 0x0800080008000800; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[3]) = 0x4343434343434343; +- *((unsigned long*)& __m256i_result[2]) = 0x4343434343434343; +- *((unsigned long*)& __m256i_result[1]) = 0x4343434343434343; +- *((unsigned long*)& __m256i_result[0]) = 0x4343434343434343; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x38); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0100010001000100; +- __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001a0000001a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001a0000001a; +- __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0800080008000800; +- *((unsigned long*)& __m128d_op1[0]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_op1[0]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0040004000400040; +- __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0800080008000800; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_d(__m128i_op0,0x35); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_h(__m128i_op0,-11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0020002000200020; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_du(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; +- __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_result[1]) = 0x9a9a9a9a9a9a9a9a; +- *((unsigned long*)& __m128i_result[0]) = 0x9aba9aba9aba9aba; +- __m128i_out = __lsx_vxori_b(__m128i_op0,0x9a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; +- *((unsigned long*)& __m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000020000; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0040004000400040; +- *((unsigned long*)& __m128i_result[0]) = 0x0040004000400040; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x55555555; +- *((int*)& __m256_op0[6]) = 0x55555555; +- *((int*)& __m256_op0[5]) = 0x5d5d5d5d; +- *((int*)& __m256_op0[4]) = 0x5d555d55; +- *((int*)& __m256_op0[3]) = 0x55555555; +- *((int*)& __m256_op0[2]) = 0x55555555; +- *((int*)& __m256_op0[1]) = 0x5d5ca2a3; +- *((int*)& __m256_op0[0]) = 0x5d54aaab; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0100000001000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0100000001000000; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffee; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_result[2]) = 0x01fc03fc01fc03fc; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_result[0]) = 0x01fc03fc01fc03fc; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x3e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256d_op0[2]) = 0x01fc03fc01fc03fc; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256d_op0[0]) = 0x01fc03fc01fc03fc; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_result[2]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_result[0]) = 0x3ff0000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0100000001000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100000001000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0040004000400040; +- *((unsigned long*)& __m128i_op1[0]) = 0x0040004000400040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0040004000400040; +- *((unsigned long*)& __m128i_op1[0]) = 0x0040004000400040; +- *((unsigned long*)& __m128i_result[1]) = 0xffc0ffc0ffc0ffc0; +- *((unsigned long*)& __m128i_result[0]) = 0xffc0ffc0ffc0ffc0; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffdffd; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffdffd; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffdffd; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffdffd; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffee; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffee; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffee; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffee; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0040004000400040; +- *((unsigned long*)& __m128i_op1[0]) = 0x0040004000400040; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_op0[2]) = 0x01fc03fc01fc03fc; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m256i_op0[0]) = 0x01fc03fc01fc03fc; +- *((unsigned long*)& __m256i_result[3]) = 0x000000200000001e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000200000001e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0081000100810001; +- *((unsigned long*)& __m256i_result[2]) = 0x0081000100810001; +- *((unsigned long*)& __m256i_result[1]) = 0x0081000100810001; +- *((unsigned long*)& __m256i_result[0]) = 0x0081000100810001; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffdc; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffdc; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000040000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffdc; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffdc; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffdc; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffdc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffffdd; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffdc; +- __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff80ff00ff80ff01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff80ff00ff80ff01; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff80ff00ff80ff01; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff80ff00ff80ff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x007f00ff007f00fe; +- *((unsigned long*)& __m256i_op2[2]) = 0xf711ee11f711ee91; +- *((unsigned long*)& __m256i_op2[1]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op2[0]) = 0xf711ee11f711ee11; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff80ff00ff80ff01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff80ff00ff80ff01; +- __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000040000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; +- __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000080000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000002affaa; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff002affaa; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000002affaa; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffd50055; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x002affaa00000000; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00820082ff81ff81; +- *((unsigned long*)& __m128d_op0[0]) = 0xff81ff81ff81ff81; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000820000ff81; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff810000ff81; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000820000ff81; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff810000ff81; +- __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffeffffffdd; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffdc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x002affaa00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffffdd; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffdc; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256d_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x000000000000ffff; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ee; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ee; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007f00ff007f00ff; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffeffffffdd; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffdc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x001f001f001f001f; +- *((unsigned long*)& __m128i_result[0]) = 0x001f001f001f001f; +- __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000808; +- __m256i_out = __lasx_xvclo_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000808; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1010100fefefeff0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0f8f0e8df676f778; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_result[2]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_result[0]) = 0xff00ff00ff00ef32; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ee; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ee; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffce; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000fc7c; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffce; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000fc7c; +- __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x28); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xff81ff82ff810081; +- *((unsigned long*)& __m128i_op2[0]) = 0xff82ff810081ff81; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_op0[2]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_op0[1]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_op0[0]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; +- __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op1[3]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_op1[2]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_op1[1]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_op1[0]) = 0xe7e7e7e7e7e7e7e7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0xe6e8e6e8e6e8d719; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0xe6e8e6e8e6e8d719; +- __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x0000ffce; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000fc7c; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x0000ffce; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000fc7c; +- *((int*)& __m256_op1[7]) = 0xe7e7e7e7; +- *((int*)& __m256_op1[6]) = 0xe7e7e7e7; +- *((int*)& __m256_op1[5]) = 0xe7e7e7e7; +- *((int*)& __m256_op1[4]) = 0xe7e7e7e7; +- *((int*)& __m256_op1[3]) = 0xe7e7e7e7; +- *((int*)& __m256_op1[2]) = 0xe7e7e7e7; +- *((int*)& __m256_op1[1]) = 0xe7e7e7e7; +- *((int*)& __m256_op1[0]) = 0xe7e7e7e7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ffce20; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ffce20; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ee1100; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000004560408; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ee1100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000004560408; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff1100; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000004560420; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff1100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000004560420; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ffce20; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ffce20; +- *((unsigned long*)& __m256i_result[3]) = 0x1514151415141514; +- *((unsigned long*)& __m256i_result[2]) = 0x151415141514e335; +- *((unsigned long*)& __m256i_result[1]) = 0x1514151415141514; +- *((unsigned long*)& __m256i_result[0]) = 0x151415141514e335; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000080000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000080000000000; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op1[3]) = 0x1514151415141514; +- *((unsigned long*)& __m256i_op1[2]) = 0x151415141514e335; +- *((unsigned long*)& __m256i_op1[1]) = 0x1514151415141514; +- *((unsigned long*)& __m256i_op1[0]) = 0x151415141514e335; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000e9ece9ec; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000e9ece9ec; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000e9ece9ec; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000e9ece9ec; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256d_op0[2]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256d_op0[1]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256d_op0[0]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256d_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256d_op1[2]) = 0x00ff00ff00ef0120; +- *((unsigned long*)& __m256d_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256d_op1[0]) = 0x00ff00ff00ef0120; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffff007f00000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffff007f00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xecec006c00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xecec006c00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff007f00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff007f00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ef0120; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ef0120; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000e9ece9ec; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000e9ece9ec; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000e9ece9ec; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000e9ece9ec; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff0120; +- *((unsigned long*)& __m256i_result[2]) = 0x0000e9ec0000e9ec; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff0120; +- *((unsigned long*)& __m256i_result[0]) = 0x0000e9ec0000e9ec; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x38); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff007f00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff007f00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff0000007f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00550f0000550f00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00ff00ff00ef32; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000015c015c0; +- *((unsigned long*)& __m256i_result[2]) = 0xc0c0c0cdc0c0c0cd; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xc0c0c0cdc0c0c0cd; +- __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001f0000001f; +- __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff1100; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000004560420; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff1100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000004560420; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff1100; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000004560420; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff1100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000004560420; +- __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; +- __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xd04752cdd5543b56; +- *((unsigned long*)& __m256i_op0[2]) = 0x6906e68064f3d78b; +- *((unsigned long*)& __m256i_op0[1]) = 0xd04752cdd5543b56; +- *((unsigned long*)& __m256i_op0[0]) = 0x6906e68064f3d78b; +- *((unsigned long*)& __m256i_result[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000300000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000300000002; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff007f00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff007f00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000007f00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000007f00000000; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000000000001f; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000001f; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xd04752cdd5543b56; +- *((unsigned long*)& __m256i_op0[2]) = 0x6906e68064f3d78b; +- *((unsigned long*)& __m256i_op0[1]) = 0xd04752cdd5543b56; +- *((unsigned long*)& __m256i_op0[0]) = 0x6906e68064f3d78b; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff1100; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000004560420; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff1100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000004560420; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ffff00ff00; +- *((unsigned long*)& __m256i_result[2]) = 0x00000fff00004542; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ffff00ff00; +- *((unsigned long*)& __m256i_result[0]) = 0x00000fff00004542; +- __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ffff00ff00; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000fff00004542; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ffff00ff00; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000fff00004542; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ffff00ff00; +- *((unsigned long*)& __m256i_result[2]) = 0x00000fff00004542; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ffff00ff00; +- *((unsigned long*)& __m256i_result[0]) = 0x00000fff00004542; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff0000007f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff0000007f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000300000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000300000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; +- __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffffffe; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0202020202020203; +- *((unsigned long*)& __m128i_op0[0]) = 0x0202020202020203; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001f0000001f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001f0000ffff; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x52525252525252cb; +- *((unsigned long*)& __m128i_op1[0]) = 0x52525252525252cb; +- *((unsigned long*)& __m128i_result[1]) = 0xaeaeaeaeaeaeae35; +- *((unsigned long*)& __m128i_result[0]) = 0xaeaeaeaeaeaeae35; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xaeaeaeaeaeaeae35; +- *((unsigned long*)& __m128i_op0[0]) = 0xaeaeaeaeaeaeae35; +- *((unsigned long*)& __m128i_op1[1]) = 0xaeaeaeaeaeaeae35; +- *((unsigned long*)& __m128i_op1[0]) = 0xaeaeaeaeaeaeae35; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; +- __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x3e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000300000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000300000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000004411; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0008000800080008; +- *((unsigned long*)& __m256i_op0[2]) = 0x000c005e000c0029; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0004005600040020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000300000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000300000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000060008; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000c005b; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffe0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000040053; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00ff00ffff00ff00; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000fff00004542; +- *((unsigned long*)& __m256d_op0[1]) = 0x00ff00ffff00ff00; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000fff00004542; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000001f0000001f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001f0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_hu(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000005000000020; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000005000000020; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000005000000020; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000005000000020; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000005000000020; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000005000000020; +- *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000005000000020; +- *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000005000000020; +- __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0202020202020203; +- *((unsigned long*)& __m128i_op1[0]) = 0x0202020202020203; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000002020202; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000002020202; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000001f0000001f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001f0000ffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000060008; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000000c005b; +- *((unsigned long*)& __m256i_op2[1]) = 0xfffffffffffe0000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000040053; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff0007fff7; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff005affa4; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffe100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000053ffac; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000001f0000001f; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000001f0000ffff; +- *((unsigned long*)& __m256d_result[3]) = 0x60000007fffe0001; +- *((unsigned long*)& __m256d_result[2]) = 0x60000007fffe0001; +- *((unsigned long*)& __m256d_result[1]) = 0x6056fd4e7926d5c0; +- *((unsigned long*)& __m256d_result[0]) = 0x6056fd4e1a4616c4; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_du(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00040000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; +- __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00ff00ff; +- *((int*)& __m256_op0[6]) = 0x00ff00ff; +- *((int*)& __m256_op0[5]) = 0x00ff00ff; +- *((int*)& __m256_op0[4]) = 0x000c0000; +- *((int*)& __m256_op0[3]) = 0x00ff00ff; +- *((int*)& __m256_op0[2]) = 0x00ff00ff; +- *((int*)& __m256_op0[1]) = 0x00ff00ff; +- *((int*)& __m256_op0[0]) = 0x00040000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00ff00ff; +- *((int*)& __m256_result[6]) = 0x00ff00ff; +- *((int*)& __m256_result[5]) = 0x00ff00ff; +- *((int*)& __m256_result[4]) = 0x000c0000; +- *((int*)& __m256_result[3]) = 0x00ff00ff; +- *((int*)& __m256_result[2]) = 0x00ff00ff; +- *((int*)& __m256_result[1]) = 0x00ff00ff; +- *((int*)& __m256_result[0]) = 0x00040000; +- __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000005000000020; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000005000000020; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; +- *((int*)& __m256_result[7]) = 0xdf000000; +- *((int*)& __m256_result[6]) = 0x52a00000; +- *((int*)& __m256_result[5]) = 0x5b7f00ff; +- *((int*)& __m256_result[4]) = 0x5b7f00ff; +- *((int*)& __m256_result[3]) = 0xdf000000; +- *((int*)& __m256_result[2]) = 0x52a00000; +- *((int*)& __m256_result[1]) = 0x5b7f00ff; +- *((int*)& __m256_result[0]) = 0x5b7f00ff; +- __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020206431; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_w(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000005000000020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000005000000020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002800000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002800000010; +- __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00040000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00000083; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0xff01ff010000ff7d; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000fffc; +- __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x00010001000c4411; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100044411; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xff01ff01; +- *((int*)& __m128_op1[2]) = 0x0000ff7d; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x0000fffc; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000002800000010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000002800000010; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff0127000c0010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff012700040010; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff01ff010000ff7d; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x00010001000c4411; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100044411; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000002800000010; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000002800000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0002000200020018; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0002000200020008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfrecip_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000002; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000002; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002000200020018; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0002000200020008; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00c0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0040000000000000; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x35); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xdf00000052a00000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5b7f00ff5b7f00ff; +- *((unsigned long*)& __m256i_op1[1]) = 0xdf00000052a00000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5b7f00ff5b7f00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00040000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdf00000052a00000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5b7f00ff5b7f00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0xdf00000052a00000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5b7f00ff5b7f00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffefffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffefffff; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdf00000052a00000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5b7f00ff5b7f00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0xdf00000052a00000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5b7f00ff5b7f00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_result[3]) = 0xdf01010153a10101; +- *((unsigned long*)& __m256i_result[2]) = 0x5b7f01ff5b7f10ff; +- *((unsigned long*)& __m256i_result[1]) = 0xdf01010153a10101; +- *((unsigned long*)& __m256i_result[0]) = 0x5b7f01ff5b7f10ff; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffefffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffefffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0007000700070007; +- *((unsigned long*)& __m128i_result[0]) = 0x0007000700070007; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffefffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffefffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_b(__m128i_op0,5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdf00000052a00000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5b7f00ff5b7f00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0xdf00000052a00000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5b7f00ff5b7f00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00c0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0040000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000c0000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000040000000; +- __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00040000; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdf01010153a10101; +- *((unsigned long*)& __m256i_op0[2]) = 0x5b7f01ff5b7f10ff; +- *((unsigned long*)& __m256i_op0[1]) = 0xdf01010153a10101; +- *((unsigned long*)& __m256i_op0[0]) = 0x5b7f01ff5b7f10ff; +- *((unsigned long*)& __m256i_result[3]) = 0xcf01010143a10101; +- *((unsigned long*)& __m256i_result[2]) = 0x4b6f01ef4b6f00ef; +- *((unsigned long*)& __m256i_result[1]) = 0xcf01010143a10101; +- *((unsigned long*)& __m256i_result[0]) = 0x4b6f01ef4b6f00ef; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000004411; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000004411; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_result[3]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_result[2]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_result[1]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_result[0]) = 0x001f001f001f001f; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op0[2]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op0[1]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op0[0]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000c0000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000040000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0020001f001f001e; +- *((unsigned long*)& __m256i_result[2]) = 0x001f001fc01f001f; +- *((unsigned long*)& __m256i_result[1]) = 0x0020001f001f001e; +- *((unsigned long*)& __m256i_result[0]) = 0x001f001f401f001f; +- __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m128d_result[0]) = 0xbff0000000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256d_result[3]) = 0x43c0101010101010; +- *((unsigned long*)& __m256d_result[2]) = 0x43c0101010101032; +- *((unsigned long*)& __m256d_result[1]) = 0x43c0101010101010; +- *((unsigned long*)& __m256d_result[0]) = 0x43c0101010101032; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x1010101010101010; +- *((unsigned long*)& __m128i_result[0]) = 0xefefefefefefefef; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000c0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000040000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000c0000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000040000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0003030300000300; +- *((unsigned long*)& __m256i_result[2]) = 0x0003030300000300; +- *((unsigned long*)& __m256i_result[1]) = 0x0003030300000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0003030300000100; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x01010101; +- *((int*)& __m128_op0[0]) = 0x01010101; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m128i_op0[0]) = 0xbff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[1]) = 0x0039ffffffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffbeffffffffffff; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[1]) = 0x41dfffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000083b00000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x33); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff000c0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00040000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000100000020; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000083b00000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x41dfffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m128i_op1[0]) = 0xbff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x7e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_h(__m256i_op0,11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x1111111111111111; +- *((unsigned long*)& __m128i_result[0]) = 0x1111111111111111; +- __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111111111111111; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111111111111111; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x1111111111111111; +- *((unsigned long*)& __m128i_result[0]) = 0x1111111111111111; +- __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdf01010153a10101; +- *((unsigned long*)& __m256i_op0[2]) = 0x5b7f01ff5b7f10ff; +- *((unsigned long*)& __m256i_op0[1]) = 0xdf01010153a10101; +- *((unsigned long*)& __m256i_op0[0]) = 0x5b7f01ff5b7f10ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op1[2]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op1[1]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op1[0]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111111111111111; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111111111111111; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; +- __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0003030300000300; +- *((unsigned long*)& __m256d_op0[2]) = 0x0003030300000300; +- *((unsigned long*)& __m256d_op0[1]) = 0x0003030300000100; +- *((unsigned long*)& __m256d_op0[0]) = 0x0003030300000100; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x35); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0003030300000300; +- *((unsigned long*)& __m256d_op0[2]) = 0x0003030300000300; +- *((unsigned long*)& __m256d_op0[1]) = 0x0003030300000100; +- *((unsigned long*)& __m256d_op0[0]) = 0x0003030300000100; +- *((unsigned long*)& __m256d_result[3]) = 0x1febc46085090ea0; +- *((unsigned long*)& __m256d_result[2]) = 0x1febc46085090ea0; +- *((unsigned long*)& __m256d_result[1]) = 0x1febc46085090567; +- *((unsigned long*)& __m256d_result[0]) = 0x1febc46085090567; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe6; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffe6; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op0[2]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op0[1]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op0[0]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0003030300000300; +- *((unsigned long*)& __m256i_op1[2]) = 0x0003030300000300; +- *((unsigned long*)& __m256i_op1[1]) = 0x0003030300000100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0003030300000100; +- *((unsigned long*)& __m256i_result[3]) = 0x00f800f800f800f8; +- *((unsigned long*)& __m256i_result[2]) = 0x0018181800181818; +- *((unsigned long*)& __m256i_result[1]) = 0x00f800f800f800f8; +- *((unsigned long*)& __m256i_result[0]) = 0x0018181800181818; +- __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0008; +- __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0003030300000300; +- *((unsigned long*)& __m256i_op0[2]) = 0x0003030300000300; +- *((unsigned long*)& __m256i_op0[1]) = 0x0003030300000100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0003030300000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0043030300400300; +- *((unsigned long*)& __m256i_result[2]) = 0x0043030300400300; +- *((unsigned long*)& __m256i_result[1]) = 0x0043030300400100; +- *((unsigned long*)& __m256i_result[0]) = 0x0043030300400100; +- __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffff0008; +- *((int*)& __m128_op1[3]) = 0xffc2ffe0; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x0000ffc1; +- *((int*)& __m128_op1[0]) = 0x00010001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_result[3]) = 0x04080c1014182d35; +- *((unsigned long*)& __m256i_result[2]) = 0x716d696573765161; +- *((unsigned long*)& __m256i_result[1]) = 0x04080c1014182d35; +- *((unsigned long*)& __m256i_result[0]) = 0x716d696573765161; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op1[3]) = 0x00f800f800f800f8; +- *((unsigned long*)& __m256i_op1[2]) = 0x0018181800181818; +- *((unsigned long*)& __m256i_op1[1]) = 0x00f800f800f800f8; +- *((unsigned long*)& __m256i_op1[0]) = 0x0018181800181818; +- *((unsigned long*)& __m256i_result[3]) = 0x001f1f3e3e1f1f00; +- *((unsigned long*)& __m256i_result[2]) = 0x0003060909060300; +- *((unsigned long*)& __m256i_result[1]) = 0x001f1f3e3e1f1f00; +- *((unsigned long*)& __m256i_result[0]) = 0x0003060909060300; +- __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111111111111111; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111111111111111; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[1]) = 0x1111113111111131; +- *((unsigned long*)& __m128i_result[0]) = 0x1111113111111131; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op1[3]) = 0x0043030300400300; +- *((unsigned long*)& __m256i_op1[2]) = 0x0043030300400300; +- *((unsigned long*)& __m256i_op1[1]) = 0x0043030300400100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0043030300400100; +- *((unsigned long*)& __m256i_result[3]) = 0xffdd001dffe00020; +- *((unsigned long*)& __m256i_result[2]) = 0xffdd001dffe00031; +- *((unsigned long*)& __m256i_result[1]) = 0xffdd001dffe00020; +- *((unsigned long*)& __m256i_result[0]) = 0xffdd001dffe00031; +- __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x001f1f3e; +- *((int*)& __m256_op1[6]) = 0x3e1f1f00; +- *((int*)& __m256_op1[5]) = 0x00030609; +- *((int*)& __m256_op1[4]) = 0x09060300; +- *((int*)& __m256_op1[3]) = 0x001f1f3e; +- *((int*)& __m256_op1[2]) = 0x3e1f1f00; +- *((int*)& __m256_op1[1]) = 0x00030609; +- *((int*)& __m256_op1[0]) = 0x09060300; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x41dfffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrecip_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111131; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111131; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffff0008; +- *((unsigned long*)& __m128i_result[1]) = 0x1111113111111141; +- *((unsigned long*)& __m128i_result[0]) = 0x1111113111111121; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_w(__m128i_op0,3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7f8000007f7fffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7f8000007f7fffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7f8000007f7fffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7f8000007f7fffff; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,-5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x41dfbe1f41e0ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffc2ffe000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x41dfbe1f41e0ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffc100010001; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xec); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3f77aab500000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; +- *((unsigned long*)& __m128i_op2[1]) = 0x3f77aab500000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000ffc100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0fbc1df53c1ae3f9; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ff820f81; +- __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1111113111111141; +- *((unsigned long*)& __m128i_op1[0]) = 0x1111113111111121; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000111111312; +- *((unsigned long*)& __m128i_result[0]) = 0x2222272111111410; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[1]) = 0x0fbc1df53c1ae3f9; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff820f81; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xf144e32bc4e61d27; +- *((unsigned long*)& __m128i_result[0]) = 0x00000020017ef19f; +- __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffdd001dffe00020; +- *((unsigned long*)& __m256i_op0[2]) = 0xffdd001dffe00031; +- *((unsigned long*)& __m256i_op0[1]) = 0xffdd001dffe00020; +- *((unsigned long*)& __m256i_op0[0]) = 0xffdd001dffe00031; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x3ff73ff83ff73ff8; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x3ff73ff83ff73ff8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x3ff73ff83ff73ff8; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x3ff73ff83ff73ff8; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256d_op2[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256d_op2[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256d_op2[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256d_result[3]) = 0xa020202020202020; +- *((unsigned long*)& __m256d_result[2]) = 0xa020202020206431; +- *((unsigned long*)& __m256d_result[1]) = 0xa020202020202020; +- *((unsigned long*)& __m256d_result[0]) = 0xa020202020206431; +- __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0x33); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_b(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffc2ffe700000007; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffc100010001; +- __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x01fffffffe000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x01fffffffe000000; +- __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x41dfffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0100000008080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc2ffe700000007; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffc100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x41dfffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xbde2ffe800000007; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffc100010001; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x41dfffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x403be000; +- *((int*)& __m128_result[2]) = 0xffffe000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0x00000020; +- *((int*)& __m128_op0[2]) = 0x00000020; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x0000ffc1; +- *((int*)& __m128_op1[0]) = 0x00010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000021ffffffdf; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000e60; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001ff85ffdc0; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000332ae5d97330; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1ff85ffe2ae5d973; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvexth_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x01ffffff; +- *((int*)& __m256_op1[4]) = 0xfe000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x01ffffff; +- *((int*)& __m256_op1[0]) = 0xfe000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000021ffffffdf; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000e60; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1ff85ffe2ae5d973; +- *((unsigned long*)& __m128i_result[1]) = 0x00010020fffeffde; +- *((unsigned long*)& __m128i_result[0]) = 0x0100400100200e68; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00010020fffeffde; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100400100200e68; +- *((unsigned long*)& __m128i_op1[1]) = 0x00010020fffeffde; +- *((unsigned long*)& __m128i_op1[0]) = 0x0100400100200e68; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x1ff85ffe2ae5d973; +- *((unsigned long*)& __m128i_result[1]) = 0x00010020fffeffde; +- *((unsigned long*)& __m128i_result[0]) = 0x011f57c100201a46; +- __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1ff85ffe2ae5d973; +- *((unsigned long*)& __m128i_op1[1]) = 0x403be000ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000ffc2f; +- *((unsigned long*)& __m128i_result[0]) = 0x00201df000000000; +- __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x29); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffc2ffe7; +- *((int*)& __m128_op0[2]) = 0x00000007; +- *((int*)& __m128_op0[1]) = 0x0000ffc1; +- *((int*)& __m128_op0[0]) = 0x00010001; +- *((int*)& __m128_op1[3]) = 0xffc2ffe7; +- *((int*)& __m128_op1[2]) = 0x00000007; +- *((int*)& __m128_op1[1]) = 0x0000ffc1; +- *((int*)& __m128_op1[0]) = 0x00010001; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x000ffc2f; +- *((int*)& __m128_op2[1]) = 0x00201df0; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xffc2ffe7; +- *((int*)& __m128_result[2]) = 0x800ffc2f; +- *((int*)& __m128_result[1]) = 0x80201df0; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; +- *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3838383838300010; +- *((unsigned long*)& __m128i_result[0]) = 0x3818200838383838; +- __m128i_out = __lsx_vnori_b(__m128i_op0,0xc7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x2222272011111410; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[0]) = 0x2222272011111410; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_op0[1]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_result[1]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0xa020202020206431; +- __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x01fffffffe000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x01fffffffe000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x01fffffffe000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x01fffffffe000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfe00000000000000; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000017f7f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000017f7f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f00000000000000; +- __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_op1[1]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202031; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202031; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; +- *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffc2ffe700000007; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffc100010001; +- *((unsigned long*)& __m128i_op2[1]) = 0x00010020fffeffde; +- *((unsigned long*)& __m128i_op2[0]) = 0x011f57c100201a46; +- *((unsigned long*)& __m128i_result[1]) = 0x001ffce00016fb41; +- *((unsigned long*)& __m128i_result[0]) = 0x57cb857100001a46; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000017f7f7f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000017f7f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000017f00007f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x00007f0000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x1111113111111141; +- *((unsigned long*)& __m128d_op1[0]) = 0x1111113111111121; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffc2ffe7; +- *((int*)& __m128_op0[2]) = 0x00000007; +- *((int*)& __m128_op0[1]) = 0x0000ffc1; +- *((int*)& __m128_op0[0]) = 0x00010001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0xfffff1a0; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfbffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfbffffffffffffff; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x3a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000017f00007f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00007f0000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fd; +- *((unsigned long*)& __m256i_result[0]) = 0xffff810000000000; +- __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202031; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202031; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_op1[1]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_result[3]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_result[1]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0xa020202020206431; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xf0800320fff1fa20; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0032000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xfbffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x7bffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xfbffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x7bffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_du(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x1111113111111141; +- *((unsigned long*)& __m128d_op0[0]) = 0x1111113111111121; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0032000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001ffce00016fb41; +- *((unsigned long*)& __m128i_op0[0]) = 0x57cb857100001a46; +- *((unsigned long*)& __m128i_op1[1]) = 0xfbffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7bffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000150000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffeffff001effff; +- __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000150000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffeffff001effff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffff1a0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000f00f; +- __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xfbffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x7bffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000020; +- *((int*)& __m128_op0[0]) = 0x00000020; +- *((unsigned long*)& __m128d_result[1]) = 0x36f0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x36f0000000000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_op0[1]) = 0xa020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0xa020202020206431; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xd010101010101010; +- *((unsigned long*)& __m256i_result[2]) = 0xd010101010103218; +- *((unsigned long*)& __m256i_result[1]) = 0xd010101010101010; +- *((unsigned long*)& __m256i_result[0]) = 0xd010101010103218; +- __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000f00f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000f00f; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfbffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7bffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfbffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7bffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xf7ffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xf7feffffffffffff; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xd010101010101010; +- *((unsigned long*)& __m256i_op0[2]) = 0xd010101010103218; +- *((unsigned long*)& __m256i_op0[1]) = 0xd010101010101010; +- *((unsigned long*)& __m256i_op0[0]) = 0xd010101010103218; +- *((unsigned long*)& __m256i_op1[3]) = 0xd010101010101010; +- *((unsigned long*)& __m256i_op1[2]) = 0xd010101010103218; +- *((unsigned long*)& __m256i_op1[1]) = 0xd010101010101010; +- *((unsigned long*)& __m256i_op1[0]) = 0xd010101010103218; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_result[2]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_result[1]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_result[0]) = 0x0010002000100020; +- __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xfe00000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xfe00000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; +- __m256d_out = __lasx_xvfmul_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff77777807777775; +- *((unsigned long*)& __m128i_op0[0]) = 0xe6eeef00eeeeeebf; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000f00f; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff007700070077; +- *((unsigned long*)& __m128i_result[0]) = 0x00e600ef00ee01de; +- __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x111110ff11111141; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000f00f; +- *((unsigned long*)& __m128i_result[1]) = 0x111110ff11111141; +- *((unsigned long*)& __m128i_result[0]) = 0x1111113111111100; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x111110ff11111141; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; +- *((unsigned long*)& __m128i_op1[1]) = 0xfbffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7bffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x060808ff08080820; +- *((unsigned long*)& __m128i_result[0]) = 0x4608081808080810; +- __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_h(__m256i_op0,10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000f00f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000007fff; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010002000100020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffe; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x3e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x111110ff11111141; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111100; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; +- *((unsigned long*)& __m128i_result[1]) = 0x1111311111114111; +- *((unsigned long*)& __m128i_result[0]) = 0x1111311111112111; +- __m128i_out = __lsx_vrotri_h(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffe0000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffe0000000000; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x111110ff11111141; +- *((unsigned long*)& __m128i_op1[0]) = 0x11111131111116a6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfe00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffe0000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffe0000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001ff8000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001ff8000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_h(__m128i_op0,-14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111311111114111; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111311111112111; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007fff; +- *((unsigned long*)& __m128i_result[1]) = 0x1111311111114111; +- *((unsigned long*)& __m128i_result[0]) = 0x1111311111110000; +- __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffe0000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffe0000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00080008000801ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0008000800080008; +- *((unsigned long*)& __m256i_result[1]) = 0x00080008000801ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0008000800080008; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111113111111141; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111113111111121; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff8000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffe0000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff8000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffe0000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_du(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000800000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000800000; +- __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x28); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000007fff; +- *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_result[0]) = 0x2020202020207fff; +- __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000800000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007fff; +- *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_result[0]) = 0x2020202020207f7f; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00080008000801ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0008000800080008; +- *((unsigned long*)& __m256i_op0[1]) = 0x00080008000801ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0008000800080008; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_d(__m256i_op0,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00ff0077; +- *((int*)& __m128_op0[2]) = 0x00070077; +- *((int*)& __m128_op0[1]) = 0x00e600ef; +- *((int*)& __m128_op0[0]) = 0x00ee01de; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00007fff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020643100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020643100000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0032000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000009c400000000; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0032000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op2[0]) = 0x2020202020207f7f; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff0000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0x2020202020206431; +- *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1111311111114111; +- *((unsigned long*)& __m128i_op0[0]) = 0x1111311111110000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffe0000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffe0000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x20202020; +- *((int*)& __m128_op0[2]) = 0x20202020; +- *((int*)& __m128_op0[1]) = 0x20202020; +- *((int*)& __m128_op0[0]) = 0x20207fff; +- *((int*)& __m128_op1[3]) = 0x32d3f35e; +- *((int*)& __m128_op1[2]) = 0xcd509d13; +- *((int*)& __m128_op1[1]) = 0x3e081b3c; +- *((int*)& __m128_op1[0]) = 0x93f6b356; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; +- unsigned_int_result = 0x0000000020202020; +- unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x1); +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op1[0]) = 0x2020202020207fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x01010101010101ff; +- __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207fff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x20202020; +- *((int*)& __m128_op0[2]) = 0x20202020; +- *((int*)& __m128_op0[1]) = 0x20202020; +- *((int*)& __m128_op0[0]) = 0x20207fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffff02; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; +- *((unsigned long*)& __m128i_result[1]) = 0x5d5d5d5d5d5d5d5d; +- *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d5d5d5d0000; +- __m128i_out = __lsx_vnori_b(__m128i_op0,0xa2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff80000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff80000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xc000c000c000ff81; +- *((unsigned long*)& __m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; +- *((unsigned long*)& __m128i_op1[0]) = 0x5d5d5d5d5d5d0000; +- *((unsigned long*)& __m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; +- *((unsigned long*)& __m128i_result[0]) = 0xc605c000aedd0000; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0ba00ba00ba00ba0; +- *((unsigned long*)& __m128i_op0[0]) = 0x0ba00ba00ba011eb; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000a0000000a; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000a0000000d; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,-2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; +- *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xc605c000aedd0000; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff80000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff80000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; +- *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; +- *((unsigned long*)& __m128i_op1[0]) = 0x5d5d5d5d5d5d0000; +- *((unsigned long*)& __m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; +- *((unsigned long*)& __m128i_result[0]) = 0xc605c000aedd0000; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf1819b7c0732a6b6; +- *((unsigned long*)& __m128i_op0[0]) = 0xffb9917a6e7fffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_d(__m128i_op0,12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0020002000200020; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0ba00ba00ba00ba0; +- *((unsigned long*)& __m128i_op0[0]) = 0x0ba00ba00ba011eb; +- *((unsigned long*)& __m128i_op1[1]) = 0xf1819b7c0732a6b6; +- *((unsigned long*)& __m128i_op1[0]) = 0xffb9917a6e7fffff; +- *((unsigned long*)& __m128i_result[1]) = 0x05d0ba0002e8802e; +- *((unsigned long*)& __m128i_result[0]) = 0xd005e802174023d6; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000003f; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f417f417f027e03; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000003f; +- *((unsigned long*)& __m128i_result[0]) = 0x2020202020207e03; +- __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x05d0ba0002e8802e; +- *((unsigned long*)& __m128i_op0[0]) = 0xd005e802174023d6; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xc000c000c000ff81; +- *((unsigned long*)& __m128i_op2[1]) = 0x0ba00ba00ba00ba0; +- *((unsigned long*)& __m128i_op2[0]) = 0x0ba00ba00ba011eb; +- *((unsigned long*)& __m128i_result[1]) = 0x05d0ae6002e8748e; +- *((unsigned long*)& __m128i_result[0]) = 0xcd1de80217374041; +- __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; +- *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000005151515; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000006302e00; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000005151515; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000006302e00; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000003f; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f417f417f027e03; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001fd0; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000001; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000001; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7fc00000; +- *((int*)& __m256_result[4]) = 0x7fc00000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7fc00000; +- *((int*)& __m256_result[0]) = 0x7fc00000; +- __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; +- *((unsigned long*)& __m128i_op0[0]) = 0xc605c000aedd0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_wu(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m256i_result[3]) = 0x0080010000800100; +- *((unsigned long*)& __m256i_result[2]) = 0x00c0000000c00000; +- *((unsigned long*)& __m256i_result[1]) = 0x0080010000800100; +- *((unsigned long*)& __m256i_result[0]) = 0x00c0000000c00000; +- __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x7f800000; +- *((int*)& __m256_op2[6]) = 0x7f800000; +- *((int*)& __m256_op2[5]) = 0x7fc00000; +- *((int*)& __m256_op2[4]) = 0x7fc00000; +- *((int*)& __m256_op2[3]) = 0x7f800000; +- *((int*)& __m256_op2[2]) = 0x7f800000; +- *((int*)& __m256_op2[1]) = 0x7fc00000; +- *((int*)& __m256_op2[0]) = 0x7fc00000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7fc00000; +- *((int*)& __m256_result[4]) = 0x7fc00000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7fc00000; +- *((int*)& __m256_result[0]) = 0x7fc00000; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9795698585057dec; +- *((unsigned long*)& __m128i_op0[0]) = 0x87f82867431a1d08; +- *((unsigned long*)& __m128i_result[1]) = 0x9780697084f07dd7; +- *((unsigned long*)& __m128i_result[0]) = 0x87e3285243051cf3; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001fd0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001fd0; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000003f; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f417f417f027e03; +- *((unsigned long*)& __m128i_op1[1]) = 0x9780697084f07dd7; +- *((unsigned long*)& __m128i_op1[0]) = 0x87e3285243051cf3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9780697084f07dd7; +- *((unsigned long*)& __m128i_op0[0]) = 0x87e3285243051cf3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000cdc1; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9795698585057dec; +- *((unsigned long*)& __m128i_op0[0]) = 0x87f82867431a1d08; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x1149a96eb1a08000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000cdc1; +- *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; +- *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; +- *((unsigned long*)& __m128i_op2[1]) = 0x05d0ae6002e8748e; +- *((unsigned long*)& __m128i_op2[0]) = 0xcd1de80217374041; +- *((unsigned long*)& __m128i_result[1]) = 0xf490ee600180ce20; +- *((unsigned long*)& __m128i_result[0]) = 0x063bff74fb46e356; +- __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x1149a96eb1a08000; +- *((unsigned long*)& __m128i_result[1]) = 0xb1a08000b1a08000; +- *((unsigned long*)& __m128i_result[0]) = 0xb1a08000b1a08000; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001fd0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001fd0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001ffff0001ffff; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x05d0ae6002e8748e; +- *((unsigned long*)& __m128i_op0[0]) = 0xcd1de80217374041; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000065a0; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x05d0ae6002e8748e; +- *((unsigned long*)& __m128i_op0[0]) = 0xcd1de80217374041; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000003f; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f417f417f027e03; +- *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; +- *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x60); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000001; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000001; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; +- *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; +- *((unsigned long*)& __m128i_result[1]) = 0x5237c1baffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x7d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000065a0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x2e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x9941d1d5f4ba9d08; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x9941d155f43a9d08; +- __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0xdfffffffdfffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0xdfffffffdfffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x9941d155f43a9d08; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00008bf700017052; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000f841000091aa; +- *((unsigned long*)& __m128i_op1[1]) = 0xe6d4572c8a5835bc; +- *((unsigned long*)& __m128i_op1[0]) = 0xe5017c2ac9ca9fd0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000f8410000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xe93d0bd19ff0c170; +- *((unsigned long*)& __m128d_op0[0]) = 0x5237c1bac9eadf55; +- *((unsigned long*)& __m128d_op1[1]) = 0xe6d4572c8a5835bc; +- *((unsigned long*)& __m128d_op1[0]) = 0xe5017c2ac9ca9fd0; +- *((unsigned long*)& __m128d_result[1]) = 0xe93d0bd19ff07013; +- *((unsigned long*)& __m128d_result[0]) = 0x65017c2ac9ca9fd0; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xe93d0bd19ff0c170; +- *((unsigned long*)& __m128i_op1[0]) = 0x5237c1bac9eadf55; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000007fc00000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007fc00000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000007fc00000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fc00000; +- *((unsigned long*)& __m256i_op2[3]) = 0xdfffffffdfffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xdfffffffdfffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xbff0000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xe93d0bd19ff07013; +- *((unsigned long*)& __m128d_op0[0]) = 0x65017c2ac9ca9fd0; +- *((unsigned long*)& __m128d_op1[1]) = 0x00008bf700017052; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000f841000091aa; +- *((unsigned long*)& __m128d_result[1]) = 0xe93d0bd19ff07013; +- *((unsigned long*)& __m128d_result[0]) = 0x65017c2ac9ca9fd0; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xe93d0bd19ff07013; +- *((unsigned long*)& __m128d_op1[0]) = 0x65017c2ac9ca9fd0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_d(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xe93d0bd19ff07013; +- *((unsigned long*)& __m128d_op0[0]) = 0x65017c2ac9ca9fd0; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffcafff8ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000a0; +- __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcafff8ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; +- *((unsigned long*)& __m128i_op1[1]) = 0xe6d4572c8a5835bc; +- *((unsigned long*)& __m128i_op1[0]) = 0xe5017c2ac9ca9fd0; +- *((unsigned long*)& __m128i_result[1]) = 0x00d3012b015700bb; +- *((unsigned long*)& __m128i_result[0]) = 0x0001002affca0070; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00d3012b015700bb; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001002affca0070; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0x00d3012b015700bb; +- *((unsigned long*)& __m128i_result[0]) = 0x00010000ffca0070; +- __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcafff8ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffcafff8ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a0; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00d3012b015700bb; +- *((unsigned long*)& __m128i_op1[0]) = 0x00010000ffca0070; +- *((unsigned long*)& __m128i_result[1]) = 0xff2cfed4fea8ff44; +- *((unsigned long*)& __m128i_result[0]) = 0xfffeffff0035ff8f; +- __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffcafff8ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; +- *((unsigned long*)& __m128i_op1[1]) = 0xff2cfed4fea8ff44; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffeffff0035ff8f; +- *((unsigned long*)& __m128i_result[1]) = 0x00d3012acc56f9bb; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000a0; +- __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x9c9c9c9c; +- *((int*)& __m128_op1[2]) = 0x9c9c9c9c; +- *((int*)& __m128_op1[1]) = 0x9c9c9c9c; +- *((int*)& __m128_op1[0]) = 0x9c9c9c9c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; +- __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000020202020; +- *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_result[0]) = 0x2020202020202020; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000900000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000900013fa0; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x23); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x80000000; +- *((int*)& __m256_op0[6]) = 0x80000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x80000000; +- *((int*)& __m256_op0[2]) = 0x80000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000001; +- *((int*)& __m128_op0[2]) = 0xca02f854; +- *((int*)& __m128_op0[1]) = 0x00000001; +- *((int*)& __m128_op0[0]) = 0x00013fa0; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0xca02f854; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfrint_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ca02f854; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000fea8ff44; +- *((unsigned long*)& __m128d_op1[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128d_op1[0]) = 0x2020202020202020; +- *((unsigned long*)& __m128d_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128d_result[0]) = 0x2020202020202020; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ca02f854; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ca02f854; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ca0200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ca0200000000; +- __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xbff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xbff00000bff00000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xbff00000bff00000; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xbff00000bff00000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xbff00000bff00000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffbff1ffffbff1; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffbff1ffffbff1; +- __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x2020202020202020; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_result[0]) = 0x202020202020ff20; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001ca02f854; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x202020202020ff20; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x2000200020002000; +- *((unsigned long*)& __m128i_result[0]) = 0x2000200020002000; +- __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fea8ff44; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fea8ff44; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000008000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffbff1ffffbff1; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffbff1ffffbff1; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffeffc4000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffeffc4000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffeffc4000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffeffc4000000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001ca02f854; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_op1[1]) = 0x2000200020002000; +- *((unsigned long*)& __m128i_op1[0]) = 0x2000200020002000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000120002000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00d3012acc56f9bb; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000004b01; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000004b01; +- *((unsigned long*)& __m128i_op1[1]) = 0x00d3012acc56f9bb; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a0; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000004b01; +- __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000004b01; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffb4ff; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffb4ff; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffb4ff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffb4ff; +- __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00d3012acc56f9bb; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000120002000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0x00d3012acc56f9bb; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001021; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000401000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000401000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000401000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000401000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000120002000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000200020; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000003f; +- __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000001; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000016; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffb4ff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffb4ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffb4ff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffff98dea; +- __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00d3012acc56f9bb; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000001021; +- *((unsigned long*)& __m128i_result[1]) = 0x0108020410400208; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010102; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128d_result[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x40f3fa0000000000; +- __m128d_out = __lsx_vffintl_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvneg_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffb4ff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffff98dea; +- *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x40f3fa0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xc00fffffffffb4ff; +- *((unsigned long*)& __m128i_result[0]) = 0xbf0c05fffff98dea; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000120002000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0x2000200000013fa0; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000013fa0; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0020000000200000; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x2b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffdfffffffdfffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffdfffffffdfffff; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvrotri_w(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0606060606060606; +- *((unsigned long*)& __m256i_result[2]) = 0x0606060606060606; +- *((unsigned long*)& __m256i_result[1]) = 0x0606060606060606; +- *((unsigned long*)& __m256i_result[0]) = 0x0606060606060606; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0606060606060606; +- *((unsigned long*)& __m256i_op1[2]) = 0x0606060606060606; +- *((unsigned long*)& __m256i_op1[1]) = 0x0606060606060606; +- *((unsigned long*)& __m256i_op1[0]) = 0x0606060606060606; +- *((unsigned long*)& __m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000120002000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_op1[1]) = 0x2000200000013fa0; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000013fa0; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000120002000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100013fa0; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000200020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffdfffffffdfffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffdfffffffdfffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00d3012acc56f9bb; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000001021; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffb4ff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffb4ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2000200000013fa0; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000001000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000120002000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000100013fa0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffdfffffffdfffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffdfffffffdfffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0020000000200001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0020000000200001; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x5); +- *((unsigned long*)& __m128i_op0[1]) = 0x00d3012acc56f9bb; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001021; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00d3012acc56f9bb; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001021; +- __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffe000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffe000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x54); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x40f3fa0000000000; +- __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000006a9a5c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000092444; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000006a9a5c; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000092444; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000d4ccb8; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000124888; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00d4ccb8; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00124888; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffbd994889; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000a092444; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000890000000000; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0x58); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffe000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffe000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000e000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000e000; +- __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffb4ff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffb4ff; +- *((unsigned long*)& __m128i_result[1]) = 0xc110000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xc00d060000000000; +- __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xda); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc110000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc00d060000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x40f3fa0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xf047ef0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xf047ef0000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xbd994889; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0a092444; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x3941248880000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x39412488; +- *((int*)& __m128_op0[0]) = 0x80000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x18171615; +- *((int*)& __m128_op0[2]) = 0x17161514; +- *((int*)& __m128_op0[1]) = 0x16151413; +- *((int*)& __m128_op0[0]) = 0x151d3756; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x39412488; +- *((int*)& __m128_op1[0]) = 0x80000000; +- *((int*)& __m128_op2[3]) = 0x3ff00000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x40f3fa00; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xbff00000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0xc0f3fa00; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xc0f3fa0080000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffec060; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3ff0008000800080; +- *((unsigned long*)& __m128i_result[0]) = 0x40f3fa8000800080; +- __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3941248880000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3941248880000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x40f3fa0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x76f4248880000000; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x76f42488; +- *((int*)& __m128_op0[0]) = 0x80000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000000; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_result[1]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00003fff00003fff; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x40f3fa0000000000; +- __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff0000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff0000ff; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff0000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffff0000ff; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc485edbcc0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x003f000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x007c000d00400000; +- __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff0000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff0000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ff00000000ff; +- __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x76f424887fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xc110000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc00d060000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xc110000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff7fffffff; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc485edbcc0000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000c485; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x30); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003f000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x007c000d00400000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000003f00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000007c00000040; +- __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x31); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fd; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fd; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x18171615; +- *((int*)& __m128_op0[2]) = 0x17161514; +- *((int*)& __m128_op0[1]) = 0x16151413; +- *((int*)& __m128_op0[0]) = 0x15141312; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1817161517161514; +- *((unsigned long*)& __m128i_op0[0]) = 0x1615141315141312; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x76f424887fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000017161515; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000095141311; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[3]) = 0x0600060000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0600060000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x76f424887fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff082f000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003f000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000f7d1000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x773324887fffffff; +- __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x059a35ef139a8e00; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000017161515; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000095141311; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_d(__m128i_op0,0x34); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff0000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff0000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007f0200007f02; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007f0200007f02; +- __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000002; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000002; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffff00000002; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffff00000002; +- __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; +- __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xa7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000002; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000002; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000002; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff082f000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003f000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000f7d1000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x773324887fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfff082efffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x88cbdb7780000001; +- __m128i_out = __lsx_vsub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x1817161517161514; +- *((unsigned long*)& __m128d_op1[0]) = 0x1615141315141312; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff082f000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xc04d600d3aded151; +- *((unsigned long*)& __m128i_op1[0]) = 0xc000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x004cff8fffde0051; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000f7d1000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x773324887fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000017161515; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000095141311; +- *((unsigned long*)& __m128i_result[1]) = 0x000000017fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x1716151595141311; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004cff8fffde0051; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000100fe000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000100fe00010001; +- *((unsigned long*)& __m256i_result[1]) = 0x000100fe000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000100fe00010001; +- __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0xb4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000001fdfffffe02; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fefe; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff01fefffeff02; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000001fdfffffe02; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000001fefe; +- *((unsigned long*)& __m256i_result[0]) = 0xffff01fefffeff02; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fff80fe; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fff80fe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff80007ffe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ff007fff80fe; +- __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x18171615; +- *((int*)& __m128_op0[2]) = 0x17161514; +- *((int*)& __m128_op0[1]) = 0x16151413; +- *((int*)& __m128_op0[0]) = 0x15141312; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x1817161517161514; +- *((unsigned long*)& __m128i_op1[0]) = 0x1615141315141312; +- *((unsigned long*)& __m128i_result[1]) = 0x0c0c8b8a8b8b0b0a; +- *((unsigned long*)& __m128i_result[0]) = 0x8b8a8a898a8a8909; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000017161515; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000095141311; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x76f424887fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000170014; +- *((unsigned long*)& __m128i_result[0]) = 0xff0cff78ff96ff14; +- __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000002; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000002; +- __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x76f424887fffffff; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x3f800000; +- *((int*)& __m128_result[1]) = 0x4eede849; +- *((int*)& __m128_result[0]) = 0x4f000000; +- __m128_out = __lsx_vffint_s_w(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000170014; +- *((unsigned long*)& __m128i_op0[0]) = 0xff0cff78ff96ff14; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xff0cff78ff96ff14; +- __m128i_out = __lsx_vextl_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0c0c8b8a8b8b0b0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x8b8a8a898a8a8909; +- *((unsigned long*)& __m128i_op1[1]) = 0x1817161517161514; +- *((unsigned long*)& __m128i_op1[0]) = 0x1615141315141312; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000007fff80fe; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000007fff80fe; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff80007ffe; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000ff007fff80fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000003f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4eede8494f000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1817161517161514; +- *((unsigned long*)& __m128i_op1[0]) = 0x1615141315141312; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff8607db959f; +- *((unsigned long*)& __m128i_op0[0]) = 0xff0cff78ff96ff14; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000008a0000008a; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000008900000009; +- *((unsigned long*)& __m128i_result[1]) = 0x000000043c5ea7b6; +- *((unsigned long*)& __m128i_result[0]) = 0x00000008fc4ef7b4; +- __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_op2[1]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000001fdfffffe02; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fefe; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff01fefffeff02; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fd00ffff02ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001fffeff; +- *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff02ff; +- __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fd00ffff02ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fffeff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff81ffffff00; +- __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000043c5ea7b6; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000008fc4ef7b4; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000fea0000fffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0007fff8000ffff0; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000007fff8; +- *((unsigned long*)& __m256i_result[1]) = 0x0007fff8000ffff0; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000007fff8; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffff8607db959f; +- *((unsigned long*)& __m128d_op0[0]) = 0xff0cff78ff96ff14; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000800; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff900000800; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000008a0000008a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000008900000009; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff00ffffffff; +- __m128i_out = __lsx_vslti_bu(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0007fff8000ffff0; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000007fff8; +- *((unsigned long*)& __m256i_op1[1]) = 0x0007fff8000ffff0; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000007fff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0007fff8000ffff0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0007fff8000ffff0; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000008a0000008a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000008900000009; +- *((unsigned long*)& __m128i_op1[1]) = 0x63637687636316bb; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x6363771163631745; +- *((unsigned long*)& __m128i_result[0]) = 0x636363ec6363636c; +- __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000fea0000fffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363771163631745; +- *((unsigned long*)& __m128i_op1[0]) = 0x636363ec6363636c; +- *((unsigned long*)& __m128i_result[1]) = 0x006300fb00630143; +- *((unsigned long*)& __m128i_result[0]) = 0x0063ffec0063006c; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000fea0000fffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff8607db959f; +- *((unsigned long*)& __m128i_op1[0]) = 0xff0cff78ff96ff14; +- *((unsigned long*)& __m128i_result[1]) = 0x00000fea0000fffe; +- *((unsigned long*)& __m128i_result[0]) = 0xff0cff78ff96ff14; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0xc2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00007f7f00007f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000800; +- *((unsigned long*)& __m256i_result[3]) = 0x00007f7f00007f00; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00007f7f00007fff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x87); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000fea0000fffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xff0cff78ff96ff14; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000fd00ffff02fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fffeff; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op2[3]) = 0x00007f7f00007f00; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x00007f7f00007fff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0100; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00007f7f00007f00; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00007f7f00007fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0007fff8000ffff0; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000007fff8; +- *((unsigned long*)& __m256i_op1[1]) = 0x0007fff8000ffff0; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000007fff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007f00ff00000000; +- __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000c6c6ee22; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c6c62e8a; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000c6c6ee22; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6c62e8a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000bf; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000002bb; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffc000400780087; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fe80fffc0183; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffc000400f8ff87; +- *((unsigned long*)& __m256i_op0[0]) = 0xff80ff00ff7c0183; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff900000800; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffc00000078; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffc; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffc000000f8; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff790000077c; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0100; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000fd00ffff02ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fffeff; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff02ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0100; +- *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff02ff; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffff9cff05; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff9cfebd; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0001; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xc0fffff000000000; +- __m128d_out = __lsx_vffintl_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; +- *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffe0001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00003a247fff7fff; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000500000005; +- *((unsigned long*)& __m128i_result[0]) = 0x00000005fffe0006; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc0fffff000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000bf; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000002bb; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xc0fffff000000000; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffffe02; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000300000005fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffff02; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000300000005fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0007fd00000f02ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fffeff; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ffffffff00; +- __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0001; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000000bf; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000002bb; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00080000fffe0001; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ffffffff00; +- *((unsigned long*)& __m256d_result[3]) = 0x40efffe000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x40efffe000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc0fffff000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffe00000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x63637687; +- *((int*)& __m128_op0[2]) = 0x636316bb; +- *((int*)& __m128_op0[1]) = 0x63636363; +- *((int*)& __m128_op0[0]) = 0x63636363; +- *((unsigned long*)& __m128d_result[1]) = 0x446c6ed0e0000000; +- *((unsigned long*)& __m128d_result[0]) = 0x446c62d760000000; +- __m128d_out = __lsx_vfcvth_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc0fffff000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0100; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- long_int_result = 0x00000000ffff0100; +- long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; +- *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; +- int_result = 0x0000000000003a24; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f00ff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0007fff8000ffff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0007fff8000ffff0; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000030007; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; +- *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x40cd120000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff02ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0100; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ff7fff7f; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff7f027f; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ff7f0100; +- *((unsigned long*)& __m256i_result[0]) = 0xff00fe00fe7f027f; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff02ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffff0100; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00fefffeff02ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0100; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000100; +- *((unsigned long*)& __m256i_result[0]) = 0xff00feff00000000; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; +- *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0007fd00000f02ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fffeff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_result[3]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_result[2]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_result[1]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff02ff; +- __m256i_out = __lasx_xvreplve0_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; +- __m128i_out = __lsx_vreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_result[3]) = 0xff00fe00feff02fe; +- *((unsigned long*)& __m256i_result[2]) = 0xff00fe00feff027f; +- *((unsigned long*)& __m256i_result[1]) = 0xff00fe00feff02fe; +- *((unsigned long*)& __m256i_result[0]) = 0xff00fe00feff027f; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff01fe0400000006; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000005fffa; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00fe01fc0005fff4; +- __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfbfbfb17fbfb38ea; +- *((unsigned long*)& __m128i_op0[0]) = 0xfbfb47fbfbfb0404; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000005fffa; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfbfbfb17fbfb38ea; +- *((unsigned long*)& __m128i_op0[0]) = 0xfbfb47fbfbfb0404; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000002f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000029; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff02ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffff0100; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00fefffeff02ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00030006fa05f20e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00030081bd80f90e; +- *((unsigned long*)& __m256i_result[3]) = 0x00007f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x00010003fc827a86; +- *((unsigned long*)& __m256i_result[1]) = 0x00007f7f7f7f0000; +- *((unsigned long*)& __m256i_result[0]) = 0x7f017fc0ddbf7d86; +- __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op2[1]) = 0x000000000000002f; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000029; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff01fe0400000006; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000500000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff01fe0400000005; +- __m128i_out = __lsx_vmini_w(__m128i_op0,5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00fe01fc0005fff4; +- int_op1 = 0x0000000020202020; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000820202020; +- *((unsigned long*)& __m128i_result[0]) = 0x00fe01fc0005fff4; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000820202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x00fe01fc0005fff4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000003a24; +- *((unsigned long*)& __m128i_op1[0]) = 0x003dbe88077c78c1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000820205a44; +- *((unsigned long*)& __m128i_result[0]) = 0x013bc084078278b5; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; +- *((unsigned long*)& __m128i_op1[1]) = 0xfbfbfb17fbfb38ea; +- *((unsigned long*)& __m128i_op1[0]) = 0xfbfb47fbfbfb0404; +- *((unsigned long*)& __m128i_result[1]) = 0xfbfbfb17fbfb3919; +- *((unsigned long*)& __m128i_result[0]) = 0xfbfb47fbfbfb042d; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000000000002f; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000029; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000003a24; +- *((unsigned long*)& __m128d_op1[0]) = 0x003dbe88077c78c1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x40effc0000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x40effc0000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00007f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[2]) = 0x00010003fc827a86; +- *((unsigned long*)& __m256i_op1[1]) = 0x00007f7f7f7f0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f017fc0ddbf7d86; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x00153f1594ea02ff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000ffffffff0100; +- *((unsigned long*)& __m256i_op2[0]) = 0xff15c1ea95ea02ff; +- *((unsigned long*)& __m256i_result[3]) = 0xc06e7c817f7e8081; +- *((unsigned long*)& __m256i_result[2]) = 0x0000bd3f016f177a; +- *((unsigned long*)& __m256i_result[1]) = 0xc06e7c8100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x60c485800178147a; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffbe20fc; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000001cc7ee87; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000010bb83239; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000c409ed87; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0100020001bf1efd; +- *((unsigned long*)& __m256i_result[2]) = 0x010002001ec8ec88; +- *((unsigned long*)& __m256i_result[1]) = 0x010002010db9303a; +- *((unsigned long*)& __m256i_result[0]) = 0x01000200c60aeb88; +- __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00153f1594ea02ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffff0100; +- *((unsigned long*)& __m256i_op0[0]) = 0xff15c1ea95ea02ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00030006fa05f20e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00030081bd80f90e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000018; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000018; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_w_d(__m256i_op0,__m256i_op1,0x2d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0x010101010101012f; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010129; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x40efffe000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x40efffe000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00fe00feff02ff; +- *((unsigned long*)& __m256i_result[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_result[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; +- __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffff00; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffd700; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x40efffe000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x40efffe000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000000ff7fff7f; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000ff7f027f; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000ff7f0100; +- *((unsigned long*)& __m256i_op2[0]) = 0xff00fe00fe7f027f; +- *((unsigned long*)& __m256i_result[3]) = 0x40efffe09fa88260; +- *((unsigned long*)& __m256i_result[2]) = 0x6b07ca8e013fbf01; +- *((unsigned long*)& __m256i_result[1]) = 0x40efffe09fa7e358; +- *((unsigned long*)& __m256i_result[0]) = 0x80ce32be3e827f00; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000003f0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000030007; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00153f1594ea02ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffff0100; +- *((unsigned long*)& __m256i_op1[0]) = 0xff15c1ea95ea02ff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000153f15; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff15c1ea; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op0[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x9ff87f7f7f807f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x9ff87f7f7f807f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op0[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_result[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; +- __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_result[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_result[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; +- __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000018; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000018; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000018; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000018; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_result[3]) = 0x4ffc3f783fc040c0; +- *((unsigned long*)& __m256i_result[2]) = 0x3fc03f803fc040c0; +- *((unsigned long*)& __m256i_result[1]) = 0x4ffc3f783fc040c0; +- *((unsigned long*)& __m256i_result[0]) = 0x3fc03f803fc040c0; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x40efffe09fa88260; +- *((unsigned long*)& __m256i_op0[2]) = 0x6b07ca8e013fbf01; +- *((unsigned long*)& __m256i_op0[1]) = 0x40efffe09fa7e358; +- *((unsigned long*)& __m256i_op0[0]) = 0x80ce32be3e827f00; +- *((unsigned long*)& __m256d_result[3]) = 0x43d03bfff827ea21; +- *((unsigned long*)& __m256d_result[2]) = 0x43dac1f2a3804ff0; +- *((unsigned long*)& __m256d_result[1]) = 0x43d03bfff827e9f9; +- *((unsigned long*)& __m256d_result[0]) = 0x43e019c657c7d050; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000018; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000018; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff30000000b; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff3fffffff3; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff30000000b; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff3fffffff3; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000002f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000029; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000003a24; +- *((unsigned long*)& __m128i_op2[0]) = 0x003dbe88077c78c1; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000002f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000029; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op0[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffff30000000b; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffff3fffffff3; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffff30000000b; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff3fffffff3; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op1[1]) = 0x9ff87ef07f7f817f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007f7f817f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007f7f817f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f807f007f7f817f; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007f7f817f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007f7f817f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f807f007f7f817f; +- *((unsigned long*)& __m256i_op1[3]) = 0x4ffc3f783fc040c0; +- *((unsigned long*)& __m256i_op1[2]) = 0x3fc03f803fc040c0; +- *((unsigned long*)& __m256i_op1[1]) = 0x4ffc3f783fc040c0; +- *((unsigned long*)& __m256i_op1[0]) = 0x3fc03f803fc040c0; +- *((unsigned long*)& __m256i_result[3]) = 0x0003fbfc0bfbfc03; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0003fbfc0bfbfc03; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x2d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x43d03bfff827ea21; +- *((unsigned long*)& __m256i_op1[2]) = 0x43dac1f2a3804ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x43d03bfff827e9f9; +- *((unsigned long*)& __m256i_op1[0]) = 0x43e019c657c7d050; +- *((unsigned long*)& __m256i_result[3]) = 0xbc30c40107d915df; +- *((unsigned long*)& __m256i_result[2]) = 0xbc263e0e5c80b010; +- *((unsigned long*)& __m256i_result[1]) = 0xbc30c40107d91607; +- *((unsigned long*)& __m256i_result[0]) = 0xbc20e63aa8392fb0; +- __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000003a24; +- *((unsigned long*)& __m128i_op0[0]) = 0x003dbe88077c78c1; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op2[0]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000003a24; +- *((unsigned long*)& __m128i_result[0]) = 0x003dc288077c7cc1; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000008; +- *((int*)& __m128_op0[1]) = 0x00200020; +- *((int*)& __m128_op0[0]) = 0x00200020; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x4ffc3f79d20bf257; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffec6f90604bf; +- *((unsigned long*)& __m256i_op1[1]) = 0x4ffc3f79d20bf257; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffec6f90604bf; +- *((unsigned long*)& __m256i_result[3]) = 0x4ffc3f79d20bf257; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffec6f90604bf; +- *((unsigned long*)& __m256i_result[1]) = 0x4ffc3f79d20bf257; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffec6f90604bf; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x43d03bfff827ea21; +- *((unsigned long*)& __m256i_op1[2]) = 0x43dac1f2a3804ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x43d03bfff827e9f9; +- *((unsigned long*)& __m256i_op1[0]) = 0x43e019c657c7d050; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xe8001411edf9c0f8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xe80014fdf0e3e428; +- __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x009f00f8007e00f0; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f007f0081007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x009f00f8007e00f0; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f007f0081007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x009f00f8007e00f0; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f007f0081007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x009f00f8007e00f0; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f007f0081007f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x009f00f8007e00f0; +- *((unsigned long*)& __m256i_result[2]) = 0x007f007f0081007f; +- *((unsigned long*)& __m256i_result[1]) = 0x009f00f8007e00f0; +- *((unsigned long*)& __m256i_result[0]) = 0x007f007f0081007f; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff7fff7f; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff7f027f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff7f0100; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00fe00fe7f027f; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000007f; +- __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000020000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000020000000; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x23); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x43d03bfff827ea21; +- *((unsigned long*)& __m256i_op0[2]) = 0x43dac1f2a3804ff0; +- *((unsigned long*)& __m256i_op0[1]) = 0x43d03bfff827e9f9; +- *((unsigned long*)& __m256i_op0[0]) = 0x43e019c657c7d050; +- *((unsigned long*)& __m256i_op1[3]) = 0x43d03bfff827ea21; +- *((unsigned long*)& __m256i_op1[2]) = 0x43dac1f2a3804ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x43d03bfff827e9f9; +- *((unsigned long*)& __m256i_op1[0]) = 0x43e019c657c7d050; +- *((unsigned long*)& __m256i_result[3]) = 0x86ff76ffff4eff42; +- *((unsigned long*)& __m256i_result[2]) = 0x86ffffffffff9eff; +- *((unsigned long*)& __m256i_result[1]) = 0x86ff76ffff4effff; +- *((unsigned long*)& __m256i_result[0]) = 0x86ff32ffaeffffa0; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffef8; +- *((unsigned long*)& __m128i_result[0]) = 0xffdfffdfffdffee0; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vslei_hu(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x40efffe09fa88260; +- *((unsigned long*)& __m256i_op0[2]) = 0x6b07ca8e013fbf01; +- *((unsigned long*)& __m256i_op0[1]) = 0x40efffe09fa7e358; +- *((unsigned long*)& __m256i_op0[0]) = 0x80ce32be3e827f00; +- *((unsigned long*)& __m256i_op1[3]) = 0x86ff76ffff4eff42; +- *((unsigned long*)& __m256i_op1[2]) = 0x86ffffffffff9eff; +- *((unsigned long*)& __m256i_op1[1]) = 0x86ff76ffff4effff; +- *((unsigned long*)& __m256i_op1[0]) = 0x86ff32ffaeffffa0; +- *((unsigned long*)& __m256i_result[3]) = 0x223d76f09f3881ff; +- *((unsigned long*)& __m256i_result[2]) = 0x3870ca8d013e76a0; +- *((unsigned long*)& __m256i_result[1]) = 0x223d76f09f37e357; +- *((unsigned long*)& __m256i_result[0]) = 0x43ec0a1b2aba7ed0; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4ffc3f783fc040c0; +- *((unsigned long*)& __m256i_op0[2]) = 0x3fc03f803fc040c0; +- *((unsigned long*)& __m256i_op0[1]) = 0x4ffc3f783fc040c0; +- *((unsigned long*)& __m256i_op0[0]) = 0x3fc03f803fc040c0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000ffff0000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffefffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffef8; +- *((unsigned long*)& __m128i_op1[0]) = 0xffdfffdfffdffee0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffdfffdf; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffefffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffefffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffefefffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000018; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000019; +- *((unsigned long*)& __m256i_result[1]) = 0x000000200000001e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000019; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffefefffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128d_op1[0]) = 0x0400000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffefefffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xbc30c40108a45423; +- *((unsigned long*)& __m256i_op1[2]) = 0xbc263e0e5d00e69f; +- *((unsigned long*)& __m256i_op1[1]) = 0xbc30c40108a4544b; +- *((unsigned long*)& __m256i_op1[0]) = 0xbc20e63aa8b9663f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffdfffdf; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffdf; +- __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffdf; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000021; +- __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffefefffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffefefffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000021; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x223d76f09f3881ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x3870ca8d013e76a0; +- *((unsigned long*)& __m256i_op0[1]) = 0x223d76f09f37e357; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ec0a1b2aba7ed0; +- *((unsigned long*)& __m256i_result[3]) = 0xdec38a1061c87f01; +- *((unsigned long*)& __m256i_result[2]) = 0xc8903673ffc28a60; +- *((unsigned long*)& __m256i_result[1]) = 0xdec38a1061c91da9; +- *((unsigned long*)& __m256i_result[0]) = 0xbd14f6e5d6468230; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffdfffdf; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000018; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000019; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000200000001e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000019; +- *((unsigned long*)& __m256i_op1[3]) = 0x223d76f09f3881ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x3870ca8d013e76a0; +- *((unsigned long*)& __m256i_op1[1]) = 0x223d76f09f37e357; +- *((unsigned long*)& __m256i_op1[0]) = 0x43ec0a1b2aba7ed0; +- *((unsigned long*)& __m256i_result[3]) = 0x223d771060c77e19; +- *((unsigned long*)& __m256i_result[2]) = 0x3870caad013e76b9; +- *((unsigned long*)& __m256i_result[1]) = 0x223d771060c81cc7; +- *((unsigned long*)& __m256i_result[0]) = 0x43ec0a3b2aba7ee9; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000007f; +- __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000002; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000007f; +- *((int*)& __m256_op1[7]) = 0xfffffff3; +- *((int*)& __m256_op1[6]) = 0x0000000b; +- *((int*)& __m256_op1[5]) = 0xfffffff3; +- *((int*)& __m256_op1[4]) = 0xfffffff3; +- *((int*)& __m256_op1[3]) = 0xfffffff3; +- *((int*)& __m256_op1[2]) = 0x0000000b; +- *((int*)& __m256_op1[1]) = 0xfffffff3; +- *((int*)& __m256_op1[0]) = 0xfffffff3; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000018; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000019; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000200000001e; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000019; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0004000000030000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000400000003c000; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x33); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x009f00f8007e00f0; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f007f0081007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x009f00f8007e00f0; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f007f0081007f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0ea85f60984a8555; +- *((unsigned long*)& __m256i_op1[2]) = 0x00a21ef3246995f3; +- *((unsigned long*)& __m256i_op1[1]) = 0x1189ce8000fa14ed; +- *((unsigned long*)& __m256i_op1[0]) = 0x0e459089665f40f3; +- *((unsigned long*)& __m256i_result[3]) = 0x000100f800000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0020001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x000000f800000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0004000000000010; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x223d76f09f3881ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x3870ca8d013e76a0; +- *((unsigned long*)& __m256i_op0[1]) = 0x223d76f09f37e357; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ec0a1b2aba7ed0; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffff8910ffff7e01; +- *((unsigned long*)& __m256i_result[2]) = 0xffff3573ffff8960; +- *((unsigned long*)& __m256i_result[1]) = 0xffff8910ffff1ca9; +- *((unsigned long*)& __m256i_result[0]) = 0xfffff5e5ffff8130; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffdfffdf; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfrecip_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffff30000000b; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffff3fffffff3; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffff30000000b; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffff3fffffff3; +- *((unsigned long*)& __m256i_op1[3]) = 0xbc30c40108a45423; +- *((unsigned long*)& __m256i_op1[2]) = 0xbc263e0e5d00e69f; +- *((unsigned long*)& __m256i_op1[1]) = 0xbc30c40108a4544b; +- *((unsigned long*)& __m256i_op1[0]) = 0xbc20e63aa8b9663f; +- *((unsigned long*)& __m256i_result[3]) = 0x71860bf35f0f9d81; +- *((unsigned long*)& __m256i_result[2]) = 0x720ed94a46f449ed; +- *((unsigned long*)& __m256i_result[1]) = 0x71860bf35f0f9f39; +- *((unsigned long*)& __m256i_result[0]) = 0x72544f0e6e95cecd; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x71860bf35f0f9d81; +- *((unsigned long*)& __m256i_op0[2]) = 0x720ed94a46f449ed; +- *((unsigned long*)& __m256i_op0[1]) = 0x71860bf35f0f9f39; +- *((unsigned long*)& __m256i_op0[0]) = 0x72544f0e6e95cecd; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff8910ffff7e01; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff3573ffff8960; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff8910ffff1ca9; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffff5e5ffff8130; +- *((unsigned long*)& __m256i_result[3]) = 0xffffcb423a587053; +- *((unsigned long*)& __m256i_result[2]) = 0x6d46f43e71141b81; +- *((unsigned long*)& __m256i_result[1]) = 0xffffcb423a584528; +- *((unsigned long*)& __m256i_result[0]) = 0x9bdf36c8d78158a1; +- __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x223d76f09f3881ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x3870ca8d013e76a0; +- *((unsigned long*)& __m256i_op1[1]) = 0x223d76f09f37e357; +- *((unsigned long*)& __m256i_op1[0]) = 0x43ec0a1b2aba7ed0; +- *((unsigned long*)& __m256i_result[3]) = 0x111ebb784f9c4100; +- *((unsigned long*)& __m256i_result[2]) = 0x1c386546809f3b50; +- *((unsigned long*)& __m256i_result[1]) = 0x111ebb784f9bf1ac; +- *((unsigned long*)& __m256i_result[0]) = 0x21f6050d955d3f68; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfrint_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x7ff0000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000ffffffdfffdf; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000ffffffdfffdf; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbc74c3d108e05422; +- *((unsigned long*)& __m256i_op0[2]) = 0xbc1e3e6a5cace67c; +- *((unsigned long*)& __m256i_op0[1]) = 0xbc74c3d108e0544a; +- *((unsigned long*)& __m256i_op0[0]) = 0xbc18e696a86565f4; +- *((unsigned long*)& __m256i_op1[3]) = 0xbc74c3d108e05422; +- *((unsigned long*)& __m256i_op1[2]) = 0xbc1e3e6a5cace67c; +- *((unsigned long*)& __m256i_op1[1]) = 0xbc74c3d108e0544a; +- *((unsigned long*)& __m256i_op1[0]) = 0xbc18e696a86565f4; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x48); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xa5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff8910ffff7e01; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff3573ffff8960; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff8910ffff1ca9; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffff5e5ffff8130; +- *((unsigned long*)& __m256i_result[3]) = 0xffff8910ffff7e01; +- *((unsigned long*)& __m256i_result[2]) = 0xffff3573ffff8960; +- *((unsigned long*)& __m256i_result[1]) = 0xffff8910ffff1ca9; +- *((unsigned long*)& __m256i_result[0]) = 0xfffff5e5ffff8130; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x223d76f09f3881ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x3870ca8d013e76a0; +- *((unsigned long*)& __m256i_op0[1]) = 0x223d76f09f37e357; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ec0a1b2aba7ed0; +- *((unsigned long*)& __m256i_result[3]) = 0x223d76f09f3881ff; +- *((unsigned long*)& __m256i_result[2]) = 0x3870ca9d013e76b0; +- *((unsigned long*)& __m256i_result[1]) = 0x223d76f09f37e357; +- *((unsigned long*)& __m256i_result[0]) = 0x43ec0a1b2aba7ed0; +- __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffcb423a587053; +- *((unsigned long*)& __m256i_op0[2]) = 0x6d46f43e71141b81; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffcb423a584528; +- *((unsigned long*)& __m256i_op0[0]) = 0x9bdf36c8d78158a1; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000007fffe; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000036a37; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000007fffe; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000004def9; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x2d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffbfffffffbf; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffcb423a587053; +- *((unsigned long*)& __m256d_op0[2]) = 0x6d46f43e71141b81; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffcb423a584528; +- *((unsigned long*)& __m256d_op0[0]) = 0x9bdf36c8d78158a1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x223d76f0; +- *((int*)& __m256_op0[6]) = 0x9f3881ff; +- *((int*)& __m256_op0[5]) = 0x3870ca8d; +- *((int*)& __m256_op0[4]) = 0x013e76a0; +- *((int*)& __m256_op0[3]) = 0x223d76f0; +- *((int*)& __m256_op0[2]) = 0x9f37e357; +- *((int*)& __m256_op0[1]) = 0x43ec0a1b; +- *((int*)& __m256_op0[0]) = 0x2aba7ed0; +- *((int*)& __m256_op1[7]) = 0x111ebb78; +- *((int*)& __m256_op1[6]) = 0x4f9c4100; +- *((int*)& __m256_op1[5]) = 0x1c386546; +- *((int*)& __m256_op1[4]) = 0x809f3b50; +- *((int*)& __m256_op1[3]) = 0x111ebb78; +- *((int*)& __m256_op1[2]) = 0x4f9bf1ac; +- *((int*)& __m256_op1[1]) = 0x21f6050d; +- *((int*)& __m256_op1[0]) = 0x955d3f68; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x111ebb784f9c4100; +- *((unsigned long*)& __m256i_op1[2]) = 0x1c386546809f3b50; +- *((unsigned long*)& __m256i_op1[1]) = 0x111ebb784f9bf1ac; +- *((unsigned long*)& __m256i_op1[0]) = 0x21f6050d955d3f68; +- *((unsigned long*)& __m256i_result[3]) = 0x088f5dbc27ce2080; +- *((unsigned long*)& __m256i_result[2]) = 0x161c32a2c04f9da7; +- *((unsigned long*)& __m256i_result[1]) = 0x088f5dbc27cdf8d6; +- *((unsigned long*)& __m256i_result[0]) = 0x10fb02864aae9fb4; +- __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x111ebb78; +- *((int*)& __m256_op1[6]) = 0x4f9c4100; +- *((int*)& __m256_op1[5]) = 0x1c386546; +- *((int*)& __m256_op1[4]) = 0x809f3b50; +- *((int*)& __m256_op1[3]) = 0x111ebb78; +- *((int*)& __m256_op1[2]) = 0x4f9bf1ac; +- *((int*)& __m256_op1[1]) = 0x21f6050d; +- *((int*)& __m256_op1[0]) = 0x955d3f68; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op1[1]) = 0xff7ffffef77fffdd; +- *((unsigned long*)& __m128i_op1[0]) = 0xf77edf9cffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvabsd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vrotri_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x111ebb784f9c4100; +- *((unsigned long*)& __m256i_op0[2]) = 0x1c386546809f3b50; +- *((unsigned long*)& __m256i_op0[1]) = 0x111ebb784f9bf1ac; +- *((unsigned long*)& __m256i_op0[0]) = 0x21f6050d955d3f68; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xbab0c4b000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xaa0ac09800000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000007fffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000036a37; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000007fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000004def9; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff7ffffef77fffdd; +- *((unsigned long*)& __m128i_op1[0]) = 0xf77edf9cffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000008800022; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000001; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x29); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xb8ec43be; +- *((int*)& __m128_op1[2]) = 0xfe38e64b; +- *((int*)& __m128_op1[1]) = 0x6477d042; +- *((int*)& __m128_op1[0]) = 0x343cce24; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000008800022; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffff00000001; +- *((unsigned long*)& __m128d_op2[1]) = 0xb8ec43befe38e64b; +- *((unsigned long*)& __m128d_op2[0]) = 0x6477d042343cce24; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffbfffffffbf; +- __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0097011900f4009f; +- *((unsigned long*)& __m256i_op0[2]) = 0x003200d4010f0144; +- *((unsigned long*)& __m256i_op0[1]) = 0x0097011900f301cd; +- *((unsigned long*)& __m256i_op0[0]) = 0x010b008800f80153; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4ffc3f7800000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3fc03f6400000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x4ffc3f7800000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3fc03f6400000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x4eb13ec100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3ec13ec100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x4eb13ec100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3ec13ec100000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,-12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001ffffff7f; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x5f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0004040404000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0004040404000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0004040404000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0004040404000000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffbfffffffbf; +- long_op1 = 0x0000000000003a24; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000003a24; +- __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffbfffffffbe; +- __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x4ffc3f7800000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x3fc03f6400000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x4ffc3f7800000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x3fc03f6400000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000050fd00000101; +- *((unsigned long*)& __m256i_result[2]) = 0x000040c100000101; +- *((unsigned long*)& __m256i_result[1]) = 0x000050fd00000101; +- *((unsigned long*)& __m256i_result[0]) = 0x000040c100000101; +- __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0004040404000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0004040404000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0004040404000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0004040404000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000050fd00000101; +- *((unsigned long*)& __m256i_op1[2]) = 0x000040c100000101; +- *((unsigned long*)& __m256i_op1[1]) = 0x000050fd00000101; +- *((unsigned long*)& __m256i_op1[0]) = 0x000040c100000101; +- *((unsigned long*)& __m256i_result[3]) = 0x000050fd00000101; +- *((unsigned long*)& __m256i_result[2]) = 0x000040c100000101; +- *((unsigned long*)& __m256i_result[1]) = 0x000050fd00000101; +- *((unsigned long*)& __m256i_result[0]) = 0x000040c100000101; +- __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_result[0]) = 0x4040404040404040; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff7e00000081; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000008000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff5fffffff5; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff5fffffff5; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff5fffffff5; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff5fffffff5; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff7e00000081; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffbfffffffbf; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000000000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0404000004040000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op0[1]) = 0x0404000004040000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op1[3]) = 0x8011ffee804c004c; +- *((unsigned long*)& __m256d_op1[2]) = 0x00faff0500c3ff3c; +- *((unsigned long*)& __m256d_op1[1]) = 0x80f900f980780078; +- *((unsigned long*)& __m256d_op1[0]) = 0x0057ffa800ceff31; +- *((unsigned long*)& __m256d_op2[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256d_op2[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256d_op2[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256d_op2[0]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256d_result[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256d_result[2]) = 0x80003fc00000428a; +- *((unsigned long*)& __m256d_result[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256d_result[0]) = 0x80003fc00000428a; +- __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m256_op0[7]) = 0x01010101; +- *((int*)& __m256_op0[6]) = 0x01010101; +- *((int*)& __m256_op0[5]) = 0x01010101; +- *((int*)& __m256_op0[4]) = 0x01010101; +- *((int*)& __m256_op0[3]) = 0x01010101; +- *((int*)& __m256_op0[2]) = 0x01010101; +- *((int*)& __m256_op0[1]) = 0x01010101; +- *((int*)& __m256_op0[0]) = 0x01010101; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff0000007f800000; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; +- *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; +- *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; +- *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffee0000004c0000; +- *((unsigned long*)& __m256i_result[2]) = 0xff050000ff3c0000; +- *((unsigned long*)& __m256i_result[1]) = 0x00f9000000780000; +- *((unsigned long*)& __m256i_result[0]) = 0xffa80000ff310000; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffee0000004c0000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff050000ff3c0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00f9000000780000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffa80000ff310000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffbfc0ffffbfc0; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffbfc0ffffbfc0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000032; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8011ffee804c004c; +- *((unsigned long*)& __m256i_op1[2]) = 0x00faff0500c3ff3c; +- *((unsigned long*)& __m256i_op1[1]) = 0x80f900f980780078; +- *((unsigned long*)& __m256i_op1[0]) = 0x0057ffa800ceff31; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[2]) = 0xff000000ff000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m256i_result[0]) = 0xff000000ff000000; +- __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; +- *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; +- *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; +- *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_op1[2]) = 0xff000000ff000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m256i_op1[0]) = 0xff000000ff000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffee0000ff4c; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ff050000ff3c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fff90000ff78; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffa80000ff31; +- __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; +- *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; +- *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; +- *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffee0000ff4c; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ff050000ff3c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fff90000ff78; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffa80000ff31; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffee0000ff4c; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ff050000ff3c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fff90000ff78; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffa80000ff31; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_result[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_result[0]) = 0x4040404040404040; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; +- *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; +- *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; +- *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; +- *((unsigned long*)& __m256i_result[3]) = 0x8011ffae800c000c; +- *((unsigned long*)& __m256i_result[2]) = 0x00baff050083ff3c; +- *((unsigned long*)& __m256i_result[1]) = 0x80b900b980380038; +- *((unsigned long*)& __m256i_result[0]) = 0x0017ffa8008eff31; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op2[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op2[0]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_result[1]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0fff0fff0fff0fff; +- __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8011ffee804c004c; +- *((unsigned long*)& __m256i_op0[2]) = 0x00faff0500c3ff3c; +- *((unsigned long*)& __m256i_op0[1]) = 0x80f900f980780078; +- *((unsigned long*)& __m256i_op0[0]) = 0x0057ffa800ceff31; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff000000010000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000032; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000032; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff000000010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8011ffae800c000c; +- *((unsigned long*)& __m256i_op1[2]) = 0x00baff050083ff3c; +- *((unsigned long*)& __m256i_op1[1]) = 0x80b900b980380038; +- *((unsigned long*)& __m256i_op1[0]) = 0x0017ffa8008eff31; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff800c000c; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000084ff3c; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff80380038; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000008fff31; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffee0000ff4c; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000ff050000ff3c; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000fff90000ff78; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000ffa80000ff31; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffff0000; +- *((int*)& __m256_op0[4]) = 0xffff0000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffff0000; +- *((int*)& __m256_op0[0]) = 0xffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0b085bfc00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0b004bc000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0b085bfc00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0b004bc000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0404010008080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0408010008080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0404010008080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0408010008080808; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8011ffae800c000c; +- *((unsigned long*)& __m256i_op0[2]) = 0x00baff050083ff3c; +- *((unsigned long*)& __m256i_op0[1]) = 0x80b900b980380038; +- *((unsigned long*)& __m256i_op0[0]) = 0x0017ffa8008eff31; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001010001; +- __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000012; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0fff0fff0fff0fff; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0000ff00; +- *((int*)& __m128_op1[3]) = 0x40404040; +- *((int*)& __m128_op1[2]) = 0x40404040; +- *((int*)& __m128_op1[1]) = 0x40404040; +- *((int*)& __m128_op1[0]) = 0x40404040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffee0000ff4c; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ff050000ff3c; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000fff90000ff78; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffa80000ff31; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0b085bfc00000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0b004bc000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0b085bfc00000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0b004bc000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0fff0fff7f800fff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xf001f0010101f002; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x35); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0404010008080808; +- *((unsigned long*)& __m256i_op0[2]) = 0x0408010008080808; +- *((unsigned long*)& __m256i_op0[1]) = 0x0404010008080808; +- *((unsigned long*)& __m256i_op0[0]) = 0x0408010008080808; +- *((int*)& __m256_result[7]) = 0x38808000; +- *((int*)& __m256_result[6]) = 0x37800000; +- *((int*)& __m256_result[5]) = 0x39010000; +- *((int*)& __m256_result[4]) = 0x39010000; +- *((int*)& __m256_result[3]) = 0x38808000; +- *((int*)& __m256_result[2]) = 0x37800000; +- *((int*)& __m256_result[1]) = 0x39010000; +- *((int*)& __m256_result[0]) = 0x39010000; +- __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3880800037800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3901000039010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3880800037800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3901000039010000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_result[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_result[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_result[0]) = 0x00003fc00000428a; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0006ffff0004ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002ffff0000ffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffff7f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002fffefffd0001; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; +- __m128i_out = __lsx_vmini_h(__m128i_op0,2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- int_result = 0xffffffffffffffff; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xe6e6e6e6e6e6e6e6; +- *((unsigned long*)& __m128i_result[0]) = 0xe6e6e6e6e6e6e6e6; +- __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xf001f0010101f002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0404010008080808; +- *((unsigned long*)& __m256i_op0[2]) = 0x0408010008080808; +- *((unsigned long*)& __m256i_op0[1]) = 0x0404010008080808; +- *((unsigned long*)& __m256i_op0[0]) = 0x0408010008080808; +- *((unsigned long*)& __m256i_result[3]) = 0x0505070804040404; +- *((unsigned long*)& __m256i_result[2]) = 0x0504070804040404; +- *((unsigned long*)& __m256i_result[1]) = 0x0505070804040404; +- *((unsigned long*)& __m256i_result[0]) = 0x0504070804040404; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_op1[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_result[1]) = 0x1202120212021202; +- *((unsigned long*)& __m128i_result[0]) = 0x1202120212021202; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002fffefffd0001; +- *((unsigned long*)& __m128i_op1[1]) = 0x1202120212021202; +- *((unsigned long*)& __m128i_op1[0]) = 0x1202120212021202; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_result[0]) = 0x0202fe02fd020102; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ff000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ff000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1202120212021202; +- *((unsigned long*)& __m128i_op1[0]) = 0x1202120212021202; +- *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; +- __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0505070804040404; +- *((unsigned long*)& __m256i_op0[2]) = 0x0504070804040404; +- *((unsigned long*)& __m256i_op0[1]) = 0x0505070804040404; +- *((unsigned long*)& __m256i_op0[0]) = 0x0504070804040404; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0283038402020202; +- *((unsigned long*)& __m256i_result[2]) = 0x0282038402020202; +- *((unsigned long*)& __m256i_result[1]) = 0x0283038402020202; +- *((unsigned long*)& __m256i_result[0]) = 0x0282038402020202; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0202fe02fd020102; +- *((unsigned long*)& __m128i_result[1]) = 0xfefcfefcfefcfefc; +- *((unsigned long*)& __m128i_result[0]) = 0xfcfc00fc01fcfdfc; +- __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf001f0010101f002; +- *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0202fe02fd020102; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000202fe02; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x78); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0505070804040404; +- *((unsigned long*)& __m256i_op0[2]) = 0x0504070804040404; +- *((unsigned long*)& __m256i_op0[1]) = 0x0505070804040404; +- *((unsigned long*)& __m256i_op0[0]) = 0x0504070804040404; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ff000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ff000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0504080804030405; +- *((unsigned long*)& __m256i_result[2]) = 0x0504060904040305; +- *((unsigned long*)& __m256i_result[1]) = 0x0504080804030405; +- *((unsigned long*)& __m256i_result[0]) = 0x0504060904040305; +- __m256i_out = __lasx_xvsub_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xfffefffe; +- *((int*)& __m128_op0[2]) = 0xfffefffe; +- *((int*)& __m128_op0[1]) = 0xfffefffe; +- *((int*)& __m128_op0[0]) = 0xfffefffe; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xf001f001; +- *((int*)& __m128_op1[0]) = 0x0101f002; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0202fe02fd020102; +- *((unsigned long*)& __m128i_result[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_result[0]) = 0x0400040004000400; +- __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000202fe02; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff00ff; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0504080804030405; +- *((unsigned long*)& __m256i_op0[2]) = 0x0504060904040305; +- *((unsigned long*)& __m256i_op0[1]) = 0x0504080804030405; +- *((unsigned long*)& __m256i_op0[0]) = 0x0504060904040305; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000141020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000141020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x66); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x1000100012030e02; +- *((unsigned long*)& __m128i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m128i_result[0]) = 0xfefefefefefefefe; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000202fe02; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op1[0]) = 0xffff00fc0000ff02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x0000ff00; +- *((int*)& __m256_op0[6]) = 0x0000ffff; +- *((int*)& __m256_op0[5]) = 0x000000ff; +- *((int*)& __m256_op0[4]) = 0x000000ff; +- *((int*)& __m256_op0[3]) = 0x0000ff00; +- *((int*)& __m256_op0[2]) = 0x0000ffff; +- *((int*)& __m256_op0[1]) = 0x000000ff; +- *((int*)& __m256_op0[0]) = 0x000000ff; +- *((int*)& __m256_op1[7]) = 0x0000ffee; +- *((int*)& __m256_op1[6]) = 0x0000ff4c; +- *((int*)& __m256_op1[5]) = 0x0000ff05; +- *((int*)& __m256_op1[4]) = 0x0000ff3c; +- *((int*)& __m256_op1[3]) = 0x0000fff9; +- *((int*)& __m256_op1[2]) = 0x0000ff78; +- *((int*)& __m256_op1[1]) = 0x0000ffa8; +- *((int*)& __m256_op1[0]) = 0x0000ff31; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff00fc0000ff02; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xff01ff040000fffe; +- __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000202fe02; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000101; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff3c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff31; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x5e5e5e5e5e5e5e1c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x5e5e5e5e5e5e5e10; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x5e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fffffeff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x55aa55aa55aa55ab; +- *((unsigned long*)& __m128i_op0[0]) = 0xaa55555655aaaaa8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_op1[0]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000fffffeff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000009ffffff08; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x55aa55aa55aa55ab; +- *((unsigned long*)& __m128i_op0[0]) = 0xaa55555655aaaaa8; +- *((unsigned long*)& __m128i_result[1]) = 0x55aa55c355aa55c4; +- *((unsigned long*)& __m128i_result[0]) = 0xaa55556f55aaaac1; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000141020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000141020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_result[2]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_result[1]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_result[0]) = 0x1020102010201020; +- __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffcfffcfffc00fd; +- *((unsigned long*)& __m128i_result[0]) = 0xfffcfffcfffcfffc; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_h(__m128i_op0,-16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_op0[2]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_op0[1]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_op0[0]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_result[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xefdfefdfefdfefdf; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x55aa55aa55aa55ab; +- *((unsigned long*)& __m128i_op0[0]) = 0xaa55555655aaaaa8; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ef4002d21fc7001; +- *((unsigned long*)& __m128i_op1[0]) = 0x28bf02d1ec6a35b2; +- *((unsigned long*)& __m128i_result[1]) = 0x2a7b7c9260f90ee2; +- *((unsigned long*)& __m128i_result[0]) = 0x1b1c6cdfd57f5736; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fc00000428a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x6c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_op0[2]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_op0[1]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_op0[0]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffc040ffffc09d; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op2[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_result[3]) = 0x1031146010201020; +- *((unsigned long*)& __m256i_result[2]) = 0x1020102010201020; +- *((unsigned long*)& __m256i_result[1]) = 0x1031146010201020; +- *((unsigned long*)& __m256i_result[0]) = 0x1020102010201020; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x2c2c2c2c2c2c2c2c; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0x2c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128d_op0[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128d_result[0]) = 0x1000100010001000; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x55aa55c3d5aa55c4; +- *((unsigned long*)& __m128i_op0[0]) = 0xaa55556fd5aaaac1; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000000c; +- *((unsigned long*)& __m128i_result[0]) = 0xaa55556fd5aaaac1; +- __m128i_out = __lsx_vmini_d(__m128i_op0,12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2c2c2c2c2c2c2c2c; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x2c2c2c2c2c2c2c2c; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0d0d0d0d00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0d0d0d0d00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[3]) = 0x02407a3c00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0d0cf2f30d0cf2f3; +- *((unsigned long*)& __m256i_result[1]) = 0x02407a3c00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0d0cf2f30d0cf2f3; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x86); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x2a7b7c9260f90ee2; +- *((unsigned long*)& __m128i_op1[0]) = 0x1b1c6cdfd57f5736; +- *((unsigned long*)& __m128i_result[1]) = 0x153e3e49307d0771; +- *((unsigned long*)& __m128i_result[0]) = 0x0d8e36706ac02b9b; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x153e3e49; +- *((int*)& __m128_op0[2]) = 0x307d0771; +- *((int*)& __m128_op0[1]) = 0x0d8e3670; +- *((int*)& __m128_op0[0]) = 0x6ac02b9b; +- *((int*)& __m128_op1[3]) = 0x55aa55c3; +- *((int*)& __m128_op1[2]) = 0xd5aa55c4; +- *((int*)& __m128_op1[1]) = 0xaa55556f; +- *((int*)& __m128_op1[0]) = 0xd5aaaac1; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000100000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x1000100000001000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000100000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x1000100000001000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x02407a3c00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0d0cf2f30d0cf2f3; +- *((unsigned long*)& __m256i_op0[1]) = 0x02407a3c00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0d0cf2f30d0cf2f3; +- *((unsigned long*)& __m256i_op1[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op1[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0010100000100000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100000101000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000000010; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xefdfefdf; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xefdfefdf; +- *((int*)& __m256_op1[4]) = 0xefdfefdf; +- *((int*)& __m256_op1[3]) = 0xefdfefdf; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xefdfefdf; +- *((int*)& __m256_op1[0]) = 0xefdfefdf; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010001000000010; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000080000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffefffefffefffef; +- *((unsigned long*)& __m256i_result[1]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffefffefffefffef; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffee0000ff4c; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ff050000ff3c; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000fff90000ff78; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffa80000ff31; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfff0fff0ff01ff01; +- *((unsigned long*)& __m256i_result[2]) = 0xfff0fff0fff0fff0; +- *((unsigned long*)& __m256i_result[1]) = 0xfff0fff0ff01ff01; +- *((unsigned long*)& __m256i_result[0]) = 0xfff0fff0fff0fff0; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x80000000307d0771; +- *((unsigned long*)& __m128i_op0[0]) = 0x0d8e36706ac02b9b; +- *((unsigned long*)& __m128i_op1[1]) = 0x80000000307d0771; +- *((unsigned long*)& __m128i_op1[0]) = 0x0d8e36706ac02b9b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff80df00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0010100000100000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100000101000; +- *((unsigned long*)& __m128i_result[1]) = 0x0010100000100000; +- *((unsigned long*)& __m128i_result[0]) = 0x1000100000101000; +- __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op1[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000dfa6e0c6; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff80df00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00007f7f; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256d_op0[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff80df00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000dfa6e0c6; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000d46cdc13; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000d46cdc13; +- __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op0[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_result[3]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_result[2]) = 0xdbcbdbcbdbcbdbcb; +- *((unsigned long*)& __m256i_result[1]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_result[0]) = 0xdbcbdbcbdbcbdbcb; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xa5c4c774; +- *((int*)& __m128_op0[2]) = 0x856ba83b; +- *((int*)& __m128_op0[1]) = 0x8003caef; +- *((int*)& __m128_op0[0]) = 0x54691124; +- *((unsigned long*)& __m128i_result[1]) = 0xbf800000bf800000; +- *((unsigned long*)& __m128i_result[0]) = 0xbf80000054691124; +- __m128i_out = __lsx_vfrintrm_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfff0fff0; +- *((int*)& __m256_op0[6]) = 0xff01ff01; +- *((int*)& __m256_op0[5]) = 0xfff0fff0; +- *((int*)& __m256_op0[4]) = 0xfff0fff0; +- *((int*)& __m256_op0[3]) = 0xfff0fff0; +- *((int*)& __m256_op0[2]) = 0xff01ff01; +- *((int*)& __m256_op0[1]) = 0xfff0fff0; +- *((int*)& __m256_op0[0]) = 0xfff0fff0; +- *((int*)& __m256_op1[7]) = 0xffefffef; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xffefffef; +- *((int*)& __m256_op1[4]) = 0xffefffef; +- *((int*)& __m256_op1[3]) = 0xffefffef; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xffefffef; +- *((int*)& __m256_op1[0]) = 0xffefffef; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffefffefffefffef; +- *((unsigned long*)& __m256i_op0[1]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffefffefffefffef; +- *((unsigned long*)& __m256i_op1[3]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op1[1]) = 0xefdfefdf00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xefdfefdfefdfefdf; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffefffefffefffef; +- *((unsigned long*)& __m256i_result[1]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffefffefffefffef; +- __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000060000000; +- __m128i_out = __lsx_vslli_w(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff80df00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xa5c4c774856ba837; +- *((unsigned long*)& __m128i_op1[0]) = 0x2a569f8081c3bbe9; +- *((unsigned long*)& __m128i_result[1]) = 0xffffb96bffff57c9; +- *((unsigned long*)& __m128i_result[0]) = 0xffff6080ffff4417; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xd46cdc13; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0x7fc00000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff01; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fff0; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff01; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fff0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ef4002d21fc7001; +- *((unsigned long*)& __m128i_op0[0]) = 0x28bf02d1ec6a35b2; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffb96bffff57c9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff6080ffff4417; +- *((unsigned long*)& __m128i_op2[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op2[0]) = 0xff8000007fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x7ef400ad21fc7081; +- *((unsigned long*)& __m128i_result[0]) = 0x28bf0351ec69b5f2; +- __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffb96bffff57c9; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff6080ffff4417; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffb96bffff57c9; +- *((unsigned long*)& __m128i_result[0]) = 0xffff6080ffff4417; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff01; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fff0; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff01; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fff0; +- *((unsigned long*)& __m256i_result[3]) = 0xfff0fff0ff01ff14; +- *((unsigned long*)& __m256i_result[2]) = 0xfff0fff0fff10003; +- *((unsigned long*)& __m256i_result[1]) = 0xfff0fff0ff01ff14; +- *((unsigned long*)& __m256i_result[0]) = 0xfff0fff0fff10003; +- __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ef400ad21fc7081; +- *((unsigned long*)& __m128i_op0[0]) = 0x28bf0351ec69b5f2; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffb96bffff57c9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff6080ffff4417; +- *((unsigned long*)& __m128i_result[1]) = 0x7ef3ddac21fc5a2c; +- *((unsigned long*)& __m128i_result[0]) = 0x28bee9edec690869; +- __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff14; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff10003; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff14; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff10003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfefee0e3fefefe00; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfefee0e3fefefe00; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x7ef400ad; +- *((int*)& __m128_op0[2]) = 0x21fc7081; +- *((int*)& __m128_op0[1]) = 0x28bf0351; +- *((int*)& __m128_op0[0]) = 0xec69b5f2; +- *((int*)& __m128_op1[3]) = 0xff800000; +- *((int*)& __m128_op1[2]) = 0xff800000; +- *((int*)& __m128_op1[1]) = 0xff800000; +- *((int*)& __m128_op1[0]) = 0x7fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xdfa6e0c6; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xd46cdc13; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000002c002400; +- *((unsigned long*)& __m128d_op1[1]) = 0x7ef400ad21fc7081; +- *((unsigned long*)& __m128d_op1[0]) = 0x28bf0351ec69b5f2; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0fff0ff01ff01; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fff0; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0fff0ff01ff01; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fff0; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff0; +- __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ef400ad21fc7081; +- *((unsigned long*)& __m128i_op1[0]) = 0x28bf0351ec69b5f2; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ad00007081; +- *((unsigned long*)& __m128i_result[0]) = 0x000003510000b5f2; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000dfa6e0c6; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ef400ad21fc7081; +- *((unsigned long*)& __m128i_op1[0]) = 0x28bf0351ec69b5f2; +- *((unsigned long*)& __m128i_result[1]) = 0xdfa6e0c6d46cdc13; +- *((unsigned long*)& __m128i_result[0]) = 0x21fc7081ec69b5f2; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xdfa6e0c6d46cdc13; +- *((unsigned long*)& __m128i_op0[0]) = 0x21fc7081ec69b5f2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000002c002400; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffb96bffff57c9; +- *((unsigned long*)& __m128i_op2[0]) = 0xffff6080ffff4417; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; +- *((unsigned long*)& __m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; +- __m128i_out = __lsx_vmaxi_bu(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffb96b; +- *((int*)& __m128_op0[2]) = 0xffff57c9; +- *((int*)& __m128_op0[1]) = 0xffff6080; +- *((int*)& __m128_op0[0]) = 0xffff4417; +- *((unsigned long*)& __m128i_result[1]) = 0xffffb96bffff57c9; +- *((unsigned long*)& __m128i_result[0]) = 0xffff6080ffff4417; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x000000ad; +- *((int*)& __m128_op0[2]) = 0x00007081; +- *((int*)& __m128_op0[1]) = 0x00000351; +- *((int*)& __m128_op0[0]) = 0x0000b5f2; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfrint_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x7f800000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffefffefffefffef; +- *((unsigned long*)& __m256i_op0[1]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffefffefffefffef; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[3]) = 0xff00ff0000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ff0000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff00ff00ff00ff00; +- __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffb96bffff57c9; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff6080ffff4417; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_w(__m128i_op0,-7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000dfa6e0c6; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000d46cdc13; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x64); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op1[2]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_op1[1]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op1[0]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffefffefffefffef; +- *((unsigned long*)& __m256i_op0[1]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffefffefffefffef; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00ff0000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00ff0000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[3]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffefffefffefffef; +- *((unsigned long*)& __m256i_result[1]) = 0xffefffef00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffefffefffefffef; +- __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfefee0e3; +- *((int*)& __m256_op0[6]) = 0xfefefe00; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xfefee0e3; +- *((int*)& __m256_op0[2]) = 0xfefefe00; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- unsigned_int_result = 0x00000000000000ff; +- unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0x9); +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f80000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x800080007f008000; +- __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0a0a0a0a0a0a0a0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0a0a0a0a0a0a0a0a; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffb96bffff57c9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff6080ffff4417; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0a0aa9890a0ac5f3; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op1[2]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_op1[1]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op1[0]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x24342434ffff2435; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x24342434ffff2435; +- __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x24342434ffff2435; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x24342434ffff2435; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x800080007f008000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a0aa9890a0ac5f3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffff000; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffffffffffff000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000060000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfffffffffffff000; +- __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d1c1b1a; +- *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffff000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffff000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op0[2]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_op0[1]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op0[0]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2535253514141414; +- *((unsigned long*)& __m256i_result[2]) = 0x2535253500002535; +- *((unsigned long*)& __m256i_result[1]) = 0x2535253514141414; +- *((unsigned long*)& __m256i_result[0]) = 0x2535253500002535; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_b(__m256i_op0,0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000fe; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op1[2]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_op1[1]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op1[0]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000080000001000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000080000001000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000080000001000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000080000001000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000f0000000f; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op0[2]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_op0[1]) = 0xdbcbdbcbecececec; +- *((unsigned long*)& __m256i_op0[0]) = 0xdbcbdbcb0000dbcb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000f0000000f000; +- __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x009c3e201e39e7e3; +- *((unsigned long*)& __m256i_op0[2]) = 0x87c1135043408bba; +- *((unsigned long*)& __m256i_op0[1]) = 0x009c3e201e39e7e3; +- *((unsigned long*)& __m256i_op0[0]) = 0x87c1135043408bba; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000f0000000f000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x35); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0010000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0008000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0010000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0008000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0010000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0008000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0010000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0008000000000000; +- __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001fffff001fffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x001fffff001fffff; +- *((unsigned long*)& __m128i_result[1]) = 0x001fffff001fffff; +- *((unsigned long*)& __m128i_result[0]) = 0x001fffff001fffff; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,-7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x3b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d1c1b1a; +- *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; +- *((unsigned long*)& __m128i_result[1]) = 0x01203f1e3d1c3b1a; +- *((unsigned long*)& __m128i_result[0]) = 0x3918371635143312; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000f0000000f000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff1fffffff1; +- __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x001fffff001fffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x001fffff001fffff; +- *((unsigned long*)& __m128i_result[1]) = 0x001fffff001fffff; +- *((unsigned long*)& __m128i_result[0]) = 0x001fffff001fffff; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[0]) = 0x6363636363636363; +- __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x01203f1e3d1c3b1a; +- *((unsigned long*)& __m128d_op0[0]) = 0x3918371635143312; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000af555555555; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000af555555555; +- *((unsigned long*)& __m128d_result[1]) = 0x01203f1e3d1c3b1a; +- *((unsigned long*)& __m128d_result[0]) = 0x3918371635143312; +- __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff1fffffff1; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xcd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001fffff001fffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x001fffff001fffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x21201f1e1d1c1b1a; +- *((unsigned long*)& __m128i_op1[0]) = 0x1918171615141312; +- *((unsigned long*)& __m128i_result[1]) = 0x10ff10ff10ff10ff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_result[2]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_result[1]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_result[0]) = 0x0010000f0000000f; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x01203f1e3d1c3b1a; +- *((unsigned long*)& __m128i_op0[0]) = 0x3918371635143312; +- *((unsigned long*)& __m128i_op1[1]) = 0x21201f1e1d1c1b1a; +- *((unsigned long*)& __m128i_op1[0]) = 0x1918171615141312; +- *((unsigned long*)& __m128i_result[1]) = 0x480f7fff7fff7fff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x3e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d001b1a; +- *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x21201f1e19181716; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000af555555555; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000af555555555; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000af5; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000af5; +- __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d001b1a; +- *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; +- *((unsigned long*)& __m128i_result[1]) = 0x21201f1e1d001b25; +- *((unsigned long*)& __m128i_result[0]) = 0x191817161514131d; +- __m128i_out = __lsx_vaddi_du(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0003000900050007; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x01203f1e3d1c3b1a; +- *((unsigned long*)& __m128i_op0[0]) = 0x3918371635143312; +- *((unsigned long*)& __m128i_op1[1]) = 0x21201f1e1d001b25; +- *((unsigned long*)& __m128i_op1[0]) = 0x191817161514131d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001e8e1d8; +- *((unsigned long*)& __m128i_result[0]) = 0x000000e400000001; +- __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001e8e1d8; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000e400000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000001e8e1d8; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000e400000001; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000e4e4; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000101; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000109000000c9; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x01203f1e3d1c3b1a; +- *((unsigned long*)& __m128i_op0[0]) = 0x3918371635143312; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000001d5d4; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000150d707009; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x03f1e3d28b1a8a1a; +- __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0010000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1e0000001e002000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1e0000001e002000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x03f1e3d28b1a8a1a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x03f1e3d28b1a8a1a; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x03f1e3d28b1a8a1a; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000001d5d4; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000150d707009; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fffe2a2c; +- *((unsigned long*)& __m128i_result[0]) = 0x03f1e3bd80000000; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e1d001b1a; +- *((unsigned long*)& __m128i_op0[0]) = 0x1918171615141312; +- *((unsigned long*)& __m128i_result[1]) = 0x0001918000017160; +- *((unsigned long*)& __m128i_result[0]) = 0x0001514000013120; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0020000f0000000f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010000f0000000f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x01203f1e3d1c3b1a; +- *((unsigned long*)& __m128i_op1[0]) = 0x3918371635143312; +- *((unsigned long*)& __m128i_result[1]) = 0x21011f3f193d173b; +- *((unsigned long*)& __m128i_result[0]) = 0xff39ff37ff35ff33; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op1[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21201f1e19181716; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00005dcbe7e830c0; +- *((unsigned long*)& __m128i_op1[0]) = 0x03f21e0114bf19da; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x03f1e3d28b1a8a1a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x03f1e3d28b1a8a1a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x18e2184858682868; +- __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000022; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000022; +- __m256i_out = __lasx_xvmskltz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x21011f3f193d173b; +- *((unsigned long*)& __m128i_op1[0]) = 0xff39ff37ff35ff33; +- *((unsigned long*)& __m128i_result[1]) = 0x00fe008e009e0071; +- *((unsigned long*)& __m128i_result[0]) = 0x001c006f00c4008d; +- __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000fffe; +- __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fffc0000fffc; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fffc0000fffc; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x21011f3f193d173b; +- *((unsigned long*)& __m128i_op0[0]) = 0xff39ff37ff35ff33; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000015d926c7; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000e41b; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000fffe; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; +- __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000022; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000022; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000003f200001e01; +- *((unsigned long*)& __m128i_op0[0]) = 0x000014bf000019da; +- *((unsigned long*)& __m128i_op1[1]) = 0x9c9c99aed5b88fcf; +- *((unsigned long*)& __m128i_op1[0]) = 0x7c3650c5f79a61a3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00005dcbe7e830c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000015d926c7; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000e41b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000005dcb; +- __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00005dcbe7e830c0; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op1[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000001fffff59; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x63); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00005dcbe7e830c0; +- *((unsigned long*)& __m128i_op0[0]) = 0x03f21e0114bf19da; +- *((unsigned long*)& __m128i_op1[1]) = 0x000003f200001e01; +- *((unsigned long*)& __m128i_op1[0]) = 0x000014bf000019da; +- *((unsigned long*)& __m128i_result[1]) = 0x0005fe0300010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100010001; +- __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfffffff1; +- *((int*)& __m256_op0[6]) = 0xfffffff1; +- *((int*)& __m256_op0[5]) = 0xfffffff1; +- *((int*)& __m256_op0[4]) = 0xfffffff1; +- *((int*)& __m256_op0[3]) = 0xfffffff1; +- *((int*)& __m256_op0[2]) = 0xfffffff1; +- *((int*)& __m256_op0[1]) = 0xfffffff1; +- *((int*)& __m256_op0[0]) = 0xfffffff1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000022; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000022; +- *((unsigned long*)& __m256i_result[3]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_result[2]) = 0x00000045ff740023; +- *((unsigned long*)& __m256i_result[1]) = 0x00000001fffc0001; +- *((unsigned long*)& __m256i_result[0]) = 0x00000045ff740023; +- __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xacc8c794af2caf01; +- *((unsigned long*)& __m128i_op0[0]) = 0xa91e2048938c40f0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00fd0101; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00fd0101; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00fd0101; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00fd0101; +- __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256d_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256d_op1[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256d_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256d_op1[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000001fffff59; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000aaabffff; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000015d926c7; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000e41b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000abff0000abff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000abff0000abff; +- __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_result[0]) = 0x1f5533a694f902c0; +- __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000023; +- __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x15d926c7; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0000e41b; +- *((int*)& __m128_op1[3]) = 0xfffffacd; +- *((int*)& __m128_op1[2]) = 0xb6dbecac; +- *((int*)& __m128_op1[1]) = 0x1f5533a6; +- *((int*)& __m128_op1[0]) = 0x94f902c0; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000015d926c7; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000e41b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xff56ff55ff01ff01; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xff56ff55ff01ff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007f7f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007f7f7f7f; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007f7f7f7f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007f7f7f7f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000001fffe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000001fffe; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x1223dabf; +- *((int*)& __m128_op0[2]) = 0x4c3b3549; +- *((int*)& __m128_op0[1]) = 0x8e8f8626; +- *((int*)& __m128_op0[0]) = 0xf15be124; +- *((int*)& __m128_op1[3]) = 0xfffffacd; +- *((int*)& __m128_op1[2]) = 0xb6dbecac; +- *((int*)& __m128_op1[1]) = 0x1f5533a6; +- *((int*)& __m128_op1[0]) = 0x94f902c0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffadffedbfefe; +- *((unsigned long*)& __m128i_result[0]) = 0x5f5f7bfedefb5ada; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x5a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000022ffdd; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000022ffdd; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000f4b6ff23; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000f4b6ff23; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0005fe0300010101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[0]) = 0xfe03000101010000; +- __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x007f807f007e8080; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f807f007e806f; +- *((unsigned long*)& __m256i_op0[1]) = 0x007f807f007e8080; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f807f007e806f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000007e8080; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000007e8092; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000007e8080; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000007e8092; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xfffffadf; +- *((int*)& __m128_op0[2]) = 0xfedbfefe; +- *((int*)& __m128_op0[1]) = 0x5f5f7bfe; +- *((int*)& __m128_op0[0]) = 0xdefb5ada; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff80000000; +- __m128i_out = __lsx_vftintrp_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xff56ff55; +- *((int*)& __m256_op0[4]) = 0xff01ff01; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xff56ff55; +- *((int*)& __m256_op0[0]) = 0xff01ff01; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x0000abff; +- *((int*)& __m256_op1[4]) = 0x0000abff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x0000abff; +- *((int*)& __m256_op1[0]) = 0x0000abff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xff56ff55ff01ff01; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xff56ff55ff01ff01; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000023; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000aaabffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00aa00ab00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00aa00ab00ff00ff; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00aa00ab00ff00ff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00aa00ab00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000007e8080; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000007e8092; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000007e8080; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000007e8092; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbsll_v(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffda6f; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffe3d7; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffda6e; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffe3d6; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffda6e; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffe3d6; +- *((unsigned long*)& __m128i_op1[1]) = 0xeeb1e4f4bc3763f3; +- *((unsigned long*)& __m128i_op1[0]) = 0x6f5edf5ada6fe3d7; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffe3d6; +- *((unsigned long*)& __m128i_result[0]) = 0xeeb1e4f4bc3763f3; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x23); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000043cf26c7; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000e31d4cae8636; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000021e79364; +- *((unsigned long*)& __m128i_result[0]) = 0x0000718ea657431b; +- __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x2b2a292827262524; +- *((unsigned long*)& __m256i_op1[2]) = 0x232221201f1e1d1c; +- *((unsigned long*)& __m256i_op1[1]) = 0x2b2a292827262524; +- *((unsigned long*)& __m256i_op1[0]) = 0x232221201f1e1d1c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xeeb1e4f43c3763f3; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff5a6fe3d7; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000021e79364; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000718ea657431b; +- *((unsigned long*)& __m128i_result[1]) = 0x000000006ca193ec; +- *((unsigned long*)& __m128i_result[0]) = 0x00008e72b5b94cad; +- __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffff60ca7104649; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff790a15db63d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0xfffff60ca710464a; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff790a15db63e; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_w(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- int_op0 = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2b2a292827262524; +- *((unsigned long*)& __m256i_op0[2]) = 0x232221201f1e1d1c; +- *((unsigned long*)& __m256i_op0[1]) = 0x2b2a292827262524; +- *((unsigned long*)& __m256i_op0[0]) = 0x232221201f1e1d1c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000023; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000027262524; +- *((unsigned long*)& __m256i_result[2]) = 0x232221201f1e1d1c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000027262524; +- *((unsigned long*)& __m256i_result[0]) = 0x232221201f1e1d1c; +- __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0xbd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000007e8080; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001fdda7dc4; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000007e8080; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001fdda7dc4; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ff827f80; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0226823c; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ff827f80; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0226823c; +- __m256i_out = __lasx_xvneg_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_w_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffff60ca7104649; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff790a15db63d; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffc00ffde4000; +- *((unsigned long*)& __m128i_result[0]) = 0xfe857400fed8f400; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x5a6f5c53ebed3faa; +- *((unsigned long*)& __m128i_op1[0]) = 0xa36aca4435b8b8e1; +- *((unsigned long*)& __m128i_result[1]) = 0x5a6f61865d36d3aa; +- *((unsigned long*)& __m128i_result[0]) = 0x7bea6962a0bfb621; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffda6f; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffe3d7; +- *((unsigned long*)& __m128i_result[1]) = 0xfefffffffeffda6f; +- *((unsigned long*)& __m128i_result[0]) = 0xfefffffffeffe3d7; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff827f80; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0226823c; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff827f80; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0226823c; +- *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000027262524; +- *((unsigned long*)& __m256i_op0[2]) = 0x232221201f1e1d1c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000027262524; +- *((unsigned long*)& __m256i_op0[0]) = 0x232221201f1e1d1c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000027262524; +- *((unsigned long*)& __m256i_result[2]) = 0x23222120171e151c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000027262524; +- *((unsigned long*)& __m256i_result[0]) = 0x23222120171e151c; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5a6f5c53ebed3faa; +- *((unsigned long*)& __m128i_op0[0]) = 0xa36aca4435b8b8e1; +- *((unsigned long*)& __m128i_op1[1]) = 0x5a6f5c53ebed3faa; +- *((unsigned long*)& __m128i_op1[0]) = 0xa36aca4435b8b8e1; +- *((unsigned long*)& __m128i_result[1]) = 0x5c535c533faa3faa; +- *((unsigned long*)& __m128i_result[0]) = 0xca44ca44b8e1b8e1; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x201fdfe0201fdfe0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x201fdfe0201fdfe0; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000021e79364; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000718ea657431b; +- *((unsigned long*)& __m128i_op1[1]) = 0xfefffffffeffda6f; +- *((unsigned long*)& __m128i_op1[0]) = 0xfefffffffeffe3d7; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff0000ff86; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x201fdfe0201fdfe0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x201fdfe0201fdfe0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_op1[0]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[3]) = 0x1010101010101013; +- *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[1]) = 0x1010101010101013; +- *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000ff0000ff86; +- *((unsigned long*)& __m128i_op1[1]) = 0xffa6ff91fdd8ef77; +- *((unsigned long*)& __m128i_op1[0]) = 0x061202bffb141c38; +- *((unsigned long*)& __m128i_result[1]) = 0x0000005a00000228; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff9ee000004ec; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000027262524; +- *((unsigned long*)& __m256i_op0[2]) = 0x23222120171e151c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000027262524; +- *((unsigned long*)& __m256i_op0[0]) = 0x23222120171e151c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x201fdfe0201fdfe0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x201fdfe0201fdfe0; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010127272525; +- *((unsigned long*)& __m256i_result[2]) = 0x23a2a121179e951d; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010127272525; +- *((unsigned long*)& __m256i_result[0]) = 0x23a2a121179e951d; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x62cbf96e4acfaf40; +- *((unsigned long*)& __m128i_op0[0]) = 0xf0bc9a5278285a4a; +- *((int*)& __m128_result[3]) = 0xc6178000; +- *((int*)& __m128_result[2]) = 0xbb4a4000; +- *((int*)& __m128_result[1]) = 0x47050000; +- *((int*)& __m128_result[0]) = 0x43494000; +- __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0101010127272525; +- *((unsigned long*)& __m256d_op1[2]) = 0x23a2a121179e951d; +- *((unsigned long*)& __m256d_op1[1]) = 0x0101010127272525; +- *((unsigned long*)& __m256d_op1[0]) = 0x23a2a121179e951d; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0101010127272525; +- *((unsigned long*)& __m256i_op2[2]) = 0x23a2a121179e951d; +- *((unsigned long*)& __m256i_op2[1]) = 0x0101010127272525; +- *((unsigned long*)& __m256i_op2[0]) = 0x23a2a121179e951d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffff0001; +- *((unsigned long*)& __m256i_op2[2]) = 0xfffffffffdd97dc4; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffff0001; +- *((unsigned long*)& __m256i_op2[0]) = 0xfffffffffdd97dc4; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000001; +- *((unsigned long*)& __m256i_result[2]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000001; +- *((unsigned long*)& __m256i_result[0]) = 0x1010100f10100fd4; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffa6ff91fdd8ef77; +- *((unsigned long*)& __m128i_op0[0]) = 0x061202bffb141c38; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000ff0000ff86; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x010101fe0101fe87; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000005a00000228; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffff9ee000004ec; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op1[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1f54e0ab00000000; +- __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffa6ff91fdd8ef77; +- *((unsigned long*)& __m128d_op0[0]) = 0x061202bffb141c38; +- *((unsigned long*)& __m128d_op1[1]) = 0xfefffffffed08f77; +- *((unsigned long*)& __m128d_op1[0]) = 0x8160cdd2f365ed0d; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffa6ff91fdd8ef77; +- *((unsigned long*)& __m128i_op0[0]) = 0x061202bffb141c38; +- *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[0]) = 0x010101fe0101fe87; +- *((unsigned long*)& __m128i_result[1]) = 0x0000004000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x3a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x01010101; +- *((int*)& __m128_op0[2]) = 0x01010101; +- *((int*)& __m128_op0[1]) = 0x010101fe; +- *((int*)& __m128_op0[0]) = 0x0101fe87; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000004000000002; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x5555410154551515; +- *((unsigned long*)& __m128d_op1[0]) = 0x0004455501500540; +- *((unsigned long*)& __m128d_result[1]) = 0xd555410154551515; +- *((unsigned long*)& __m128d_result[0]) = 0x8004455501500540; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x000023a20000a121; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000179e0000951d; +- *((unsigned long*)& __m256i_op1[1]) = 0x000023a20000a121; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000179e0000951d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010000000100; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x62cbf96e; +- *((int*)& __m128_op0[2]) = 0x4acfaf40; +- *((int*)& __m128_op0[1]) = 0xf0bc9a52; +- *((int*)& __m128_op0[0]) = 0x78285a4a; +- *((unsigned long*)& __m128i_result[1]) = 0x62cbf96e4acfaf40; +- *((unsigned long*)& __m128i_result[0]) = 0xf0bc9a5278285a4a; +- __m128i_out = __lsx_vfrintrz_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; +- *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; +- *((unsigned long*)& __m128i_op2[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op2[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_result[1]) = 0x62cbf84c02cbac00; +- *((unsigned long*)& __m128i_result[0]) = 0x1014120210280240; +- __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff0001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffdd97dc4; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff0001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffdd97dc4; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0001; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffdd97dc4; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0001; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffdd97dc4; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000023a20000a121; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000179e0000951d; +- *((unsigned long*)& __m256i_op0[1]) = 0x000023a20000a121; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000179e0000951d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000125100005111; +- *((unsigned long*)& __m256i_result[2]) = 0x00000c4f00004b0f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000125100005111; +- *((unsigned long*)& __m256i_result[0]) = 0x00000c4f00004b0f; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op2[0]) = 0x010101fe0101fe87; +- *((unsigned long*)& __m128i_result[1]) = 0x0101fe870101fe87; +- *((unsigned long*)& __m128i_result[0]) = 0x0101fe8700000000; +- __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0101fe870101fe87; +- *((unsigned long*)& __m128i_op1[0]) = 0x0101fe8700000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0101fe870101fe87; +- *((unsigned long*)& __m128d_op0[0]) = 0x0101fe8700000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x62cbf96e4acfaf40; +- *((unsigned long*)& __m128d_op1[0]) = 0xf0bc9a5278285a4a; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101fe870101fe87; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101fe8700000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000236200005111; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000175e0000490d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000236200005111; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000175e0000490d; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffeeffaf; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffeeffaf; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; +- *((unsigned long*)& __m256i_result[3]) = 0x0000226200005111; +- *((unsigned long*)& __m256i_result[2]) = 0x0000165e0000480d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000226200005111; +- *((unsigned long*)& __m256i_result[0]) = 0x0000165e0000480d; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x01010101; +- *((int*)& __m256_op0[6]) = 0x27272525; +- *((int*)& __m256_op0[5]) = 0x23a2a121; +- *((int*)& __m256_op0[4]) = 0x179e951d; +- *((int*)& __m256_op0[3]) = 0x01010101; +- *((int*)& __m256_op0[2]) = 0x27272525; +- *((int*)& __m256_op0[1]) = 0x23a2a121; +- *((int*)& __m256_op0[0]) = 0x179e951d; +- *((int*)& __m256_op1[7]) = 0x00001251; +- *((int*)& __m256_op1[6]) = 0x00005111; +- *((int*)& __m256_op1[5]) = 0x00000c4f; +- *((int*)& __m256_op1[4]) = 0x00004b0f; +- *((int*)& __m256_op1[3]) = 0x00001251; +- *((int*)& __m256_op1[2]) = 0x00005111; +- *((int*)& __m256_op1[1]) = 0x00000c4f; +- *((int*)& __m256_op1[0]) = 0x00004b0f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00002262; +- *((int*)& __m256_op0[6]) = 0x00005111; +- *((int*)& __m256_op0[5]) = 0x0000165e; +- *((int*)& __m256_op0[4]) = 0x0000480d; +- *((int*)& __m256_op0[3]) = 0x00002262; +- *((int*)& __m256_op0[2]) = 0x00005111; +- *((int*)& __m256_op0[1]) = 0x0000165e; +- *((int*)& __m256_op0[0]) = 0x0000480d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; +- *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x62cbf96e4acfaf40; +- __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x40); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; +- *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffb6d01f5f94f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001f50000; +- __m128i_out = __lsx_vsrani_h_w(__m128i_op0,__m128i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffeeffaf; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffeeffaf; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000051; +- *((unsigned long*)& __m256i_result[2]) = 0x0000101000000fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000051; +- *((unsigned long*)& __m256i_result[0]) = 0x0000101000000fff; +- __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000051; +- *((int*)& __m256_op1[5]) = 0x00001010; +- *((int*)& __m256_op1[4]) = 0x00000fff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000051; +- *((int*)& __m256_op1[1]) = 0x00001010; +- *((int*)& __m256_op1[0]) = 0x00000fff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000051; +- *((int*)& __m256_op0[5]) = 0x00001010; +- *((int*)& __m256_op0[4]) = 0x00000fff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000051; +- *((int*)& __m256_op0[1]) = 0x00001010; +- *((int*)& __m256_op0[0]) = 0x00000fff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000236200005111; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000175e0000490d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000236200005111; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000175e0000490d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00220021004a007e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00220021004a007e; +- *((unsigned long*)& __m256i_result[3]) = 0xfffdfffffffdffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffddffdeffb5ff8d; +- *((unsigned long*)& __m256i_result[1]) = 0xfffdfffffffdffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffddffdeffb5ff8d; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00ff00ff; +- *((int*)& __m128_op0[2]) = 0x00ff00ff; +- *((int*)& __m128_op0[1]) = 0x62cbf96e; +- *((int*)& __m128_op0[0]) = 0x4acfaf40; +- *((unsigned long*)& __m128i_result[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x62cbf96e4acfaf40; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001f50000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffe0b0000; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000236200005111; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000175e0000490d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000236200005111; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000175e0000490d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000002362; +- *((unsigned long*)& __m256i_result[2]) = 0x000000010000175d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000002362; +- *((unsigned long*)& __m256i_result[0]) = 0x000000010000175d; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00ff00ff; +- *((int*)& __m128_op0[2]) = 0x00ff00ff; +- *((int*)& __m128_op0[1]) = 0x62cbf96e; +- *((int*)& __m128_op0[0]) = 0x4acfaf40; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffeeffaf; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffeeffaf; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffeeffaf; +- *((unsigned long*)& __m256i_result[2]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffeeffaf; +- *((unsigned long*)& __m256i_result[0]) = 0x1010100f10100fd4; +- __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; +- *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; +- *((unsigned long*)& __m128i_result[1]) = 0x62cbf96e4acfaf40; +- *((unsigned long*)& __m128i_result[0]) = 0xf0bc9a5278285a4a; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffdfffffffdffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffddffdeffb5ff8d; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffdfffffffdffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffddffdeffb5ff8d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffeeffaf; +- *((unsigned long*)& __m256i_op1[2]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffeeffaf; +- *((unsigned long*)& __m256i_op1[0]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256i_op2[3]) = 0xfffdfffffffdffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffddffdeffb5ff8d; +- *((unsigned long*)& __m256i_op2[1]) = 0xfffdfffffffdffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffddffdeffb5ff8d; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffcffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0febedc9bb95dd8f; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffcffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0febedc9bb95dd8f; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x01f50000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfcvth_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000226200005111; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000165e0000480d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000226200005111; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000165e0000480d; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffd8ffc7ffdaff8a; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffd8ffc7ffdaff8a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000226200005111; +- *((unsigned long*)& __m256i_result[2]) = 0x000016000000480d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000226200005111; +- *((unsigned long*)& __m256i_result[0]) = 0x000016000000480d; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00001f5400000000; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op0[2]) = 0x000b8f81b8c840e4; +- *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op0[0]) = 0x000b8f81b8c840e4; +- *((unsigned long*)& __m256i_result[3]) = 0x000007ff000007ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000007fffffff800; +- *((unsigned long*)& __m256i_result[1]) = 0x000007ff000007ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000007fffffff800; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001f5400000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff00000001; +- *((unsigned long*)& __m256d_op0[2]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff00000001; +- *((unsigned long*)& __m256d_op0[0]) = 0x1010100f10100fd4; +- *((unsigned long*)& __m256d_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op1[2]) = 0x000b8f81b8c840e4; +- *((unsigned long*)& __m256i_op1[1]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op1[0]) = 0x000b8f81b8c840e4; +- *((unsigned long*)& __m256i_result[3]) = 0x0000504f00002361; +- *((unsigned long*)& __m256i_result[2]) = 0xffff8f81000040e4; +- *((unsigned long*)& __m256i_result[1]) = 0x0000504f00002361; +- *((unsigned long*)& __m256i_result[0]) = 0xffff8f81000040e4; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1f54e0ab00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x000007ff; +- *((int*)& __m256_op0[6]) = 0x000007ff; +- *((int*)& __m256_op0[5]) = 0x000007ff; +- *((int*)& __m256_op0[4]) = 0xfffff800; +- *((int*)& __m256_op0[3]) = 0x000007ff; +- *((int*)& __m256_op0[2]) = 0x000007ff; +- *((int*)& __m256_op0[1]) = 0x000007ff; +- *((int*)& __m256_op0[0]) = 0xfffff800; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x000007ff; +- *((int*)& __m256_result[6]) = 0x000007ff; +- *((int*)& __m256_result[5]) = 0x000007ff; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x000007ff; +- *((int*)& __m256_result[2]) = 0x000007ff; +- *((int*)& __m256_result[1]) = 0x000007ff; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000504f00002361; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff8f81000040e4; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000504f00002361; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff8f81000040e4; +- *((unsigned long*)& __m256i_op1[3]) = 0x000007ff000007ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000007ff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000007ff000007ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000007ff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000584e00002b60; +- *((unsigned long*)& __m256i_result[2]) = 0x0000787dffffbf1c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000584e00002b60; +- *((unsigned long*)& __m256i_result[0]) = 0x0000787dffffbf1c; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffeeffaf; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000011; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffeeffaf; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000011; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffd8ffc7ffdaff8a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffd8ffc7ffdaff8a; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000695d00009b8f; +- *((unsigned long*)& __m128i_op0[0]) = 0x000074f20000d272; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00001f5400000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff000000ff0000; +- __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op1[2]) = 0x000b8f81b8c840e4; +- *((unsigned long*)& __m256i_op1[1]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op1[0]) = 0x000b8f81b8c840e4; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffb3b4; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff5ffff4738; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffb3b4; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff5ffff4738; +- __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf800d0d8ffffeecf; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000383fffffdf0d; +- *((unsigned long*)& __m256i_op0[1]) = 0xf800d0d8ffffeecf; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000383fffffdf0d; +- *((unsigned long*)& __m256i_op1[3]) = 0xf000f000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xf000f000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xe800c0d8fffeeece; +- *((unsigned long*)& __m256i_result[2]) = 0xffff383efffedf0c; +- *((unsigned long*)& __m256i_result[1]) = 0xe800c0d8fffeeece; +- *((unsigned long*)& __m256i_result[0]) = 0xffff383efffedf0c; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; +- __m128i_out = __lsx_vaddi_du(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op1[3]) = 0xf800d0d8ffffeecf; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000383fffffdf0d; +- *((unsigned long*)& __m256d_op1[1]) = 0xf800d0d8ffffeecf; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000383fffffdf0d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0xf800d0d8ffffeecf; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000383fffffdf0d; +- *((unsigned long*)& __m256i_op1[1]) = 0xf800d0d8ffffeecf; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000383fffffdf0d; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0xd0d8eecf383fdf0d; +- __m256i_out = __lasx_xvpickev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffd8ffc7ffdaff8a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffd8ffc7ffdaff8a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op0[2]) = 0x000b8f81b8c850f4; +- *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op0[0]) = 0x000b8f81b8c850f4; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256i_op2[3]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op2[2]) = 0x000b8f81b8c850f4; +- *((unsigned long*)& __m256i_op2[1]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op2[0]) = 0x000b8f81b8c850f4; +- *((unsigned long*)& __m256i_result[3]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_result[2]) = 0x000b2673a90896a4; +- *((unsigned long*)& __m256i_result[1]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_result[0]) = 0x000b2673a90896a4; +- __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; +- __m128d_out = __lsx_vflogb_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001f5400000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001f00000000; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffb3b4; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffff5ffff4738; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffb3b4; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffff5ffff4738; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0xee); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x3bcc5098; +- *((int*)& __m128_op1[2]) = 0x703fa5f0; +- *((int*)& __m128_op1[1]) = 0xab7b3134; +- *((int*)& __m128_op1[0]) = 0x9703f605; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffffb3b4; +- *((int*)& __m256_op0[5]) = 0xfffffff5; +- *((int*)& __m256_op0[4]) = 0xffff4738; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffffb3b4; +- *((int*)& __m256_op0[1]) = 0xfffffff5; +- *((int*)& __m256_op0[0]) = 0xffff4738; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0xffffb3b4; +- *((int*)& __m256_result[5]) = 0xfffffff5; +- *((int*)& __m256_result[4]) = 0xffff4738; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0xffffb3b4; +- *((int*)& __m256_result[1]) = 0xfffffff5; +- *((int*)& __m256_result[0]) = 0xffff4738; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op0[2]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256d_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256d_op0[0]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xaf0489001bd4c0c3; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xaf0489001bd4c0c3; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffd8ffc7ffdaff8a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffd8ffc7ffdaff8a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000d0d8ffffeecf; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000383fffffdf0d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000d0d8ffffeecf; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000383fffffdf0d; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffd8ffc7ffffdf0d; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffd8ffc7ffffdf0d; +- __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000226200005111; +- *((unsigned long*)& __m256i_op0[2]) = 0x000016000000480d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000226200005111; +- *((unsigned long*)& __m256i_op0[0]) = 0x000016000000480d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1131288800000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1131288800000002; +- __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000014; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffd8ffc7; +- *((int*)& __m256_op0[4]) = 0xffdaff8a; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffd8ffc7; +- *((int*)& __m256_op0[0]) = 0xffdaff8a; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0xffffb3b4; +- *((int*)& __m256_op1[5]) = 0xfffffff5; +- *((int*)& __m256_op1[4]) = 0xffff4738; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0xffffb3b4; +- *((int*)& __m256_op1[1]) = 0xfffffff5; +- *((int*)& __m256_op1[0]) = 0xffff4738; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe800c0d8fffeeece; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff383efffedf0c; +- *((unsigned long*)& __m256i_op0[1]) = 0xe800c0d8fffeeece; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff383efffedf0c; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xe800c000fffeeece; +- *((unsigned long*)& __m256i_result[2]) = 0xffff383efffedf0c; +- *((unsigned long*)& __m256i_result[1]) = 0xe800c000fffeeece; +- *((unsigned long*)& __m256i_result[0]) = 0xffff383efffedf0c; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; +- __m128d_out = __lsx_vflogb_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe800c000fffeeece; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff383efffedf0c; +- *((unsigned long*)& __m256i_op0[1]) = 0xe800c000fffeeece; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff383efffedf0c; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0xe800c000fffeeece; +- *((unsigned long*)& __m256i_result[2]) = 0xffff383e000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0xe800c000fffeeece; +- *((unsigned long*)& __m256i_result[0]) = 0xffff383efffedf0c; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x26); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00220021004a007e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00220021004a007e; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xff00ff00ff00ff00; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000fffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0010000000000001; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0008000000000000; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000fffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffb3b4; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffff5ffff4738; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffb3b4; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffff5ffff4738; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256d_op0[2]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256d_op0[0]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[3]) = 0x0001b0b1b4b5dd9f; +- *((unsigned long*)& __m256d_op2[2]) = 0x7f7f7f5c8f374980; +- *((unsigned long*)& __m256d_op2[1]) = 0x0001b0b1b4b5dd9f; +- *((unsigned long*)& __m256d_op2[0]) = 0x7f7f7f5c8f374980; +- *((unsigned long*)& __m256d_result[3]) = 0x8001b0b1b4b5dd9f; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0x8001b0b1b4b5dd9f; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000fffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000007f41; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001b0b1b4b5dd9f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f7f7f5c8f374980; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001b0b1b4b5dd9f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f7f7f5c8f374980; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100007f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100007f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; +- __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x30); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000fffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007f41; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000fffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0010000000000001; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xfff00000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xfff00000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000fffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0010000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007f41; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffc7f7f; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffc000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffc7f7f; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffc000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8001b0b1b4b5dd9f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x8001b0b1b4b5dd9f; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000b0b100015d1e; +- *((unsigned long*)& __m256i_result[2]) = 0x0001fffe0001bfff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000b0b100015d1e; +- *((unsigned long*)& __m256i_result[0]) = 0x0001fffe0001bfff; +- __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x58); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffintl_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[3]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op1[2]) = 0x000b2673a90896a4; +- *((unsigned long*)& __m256i_op1[1]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op1[0]) = 0x000b2673a90896a4; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffafafb3b3dc9d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffafafb3b3dc9d; +- __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7ff8000000000000; +- __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000fffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0010000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op0[2]) = 0x000b2673a90896a4; +- *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op0[0]) = 0x000b2673a90896a4; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0xd0d8eecf383fdf0d; +- *((unsigned long*)& __m256i_result[3]) = 0x0001504f4c4b2361; +- *((unsigned long*)& __m256i_result[2]) = 0x303338a48f374969; +- *((unsigned long*)& __m256i_result[1]) = 0x0001504f4c4b2361; +- *((unsigned long*)& __m256i_result[0]) = 0x303338a48f374969; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffafaf; +- *((int*)& __m256_op0[4]) = 0xb3b3dc9d; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffafaf; +- *((int*)& __m256_op0[0]) = 0xb3b3dc9d; +- *((int*)& __m256_op1[7]) = 0x00020000; +- *((int*)& __m256_op1[6]) = 0x00020000; +- *((int*)& __m256_op1[5]) = 0x00220021; +- *((int*)& __m256_op1[4]) = 0x004a007e; +- *((int*)& __m256_op1[3]) = 0x00020000; +- *((int*)& __m256_op1[2]) = 0x00020000; +- *((int*)& __m256_op1[1]) = 0x00220021; +- *((int*)& __m256_op1[0]) = 0x004a007e; +- *((int*)& __m256_op2[7]) = 0x00000001; +- *((int*)& __m256_op2[6]) = 0x00007f7f; +- *((int*)& __m256_op2[5]) = 0x00000001; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000001; +- *((int*)& __m256_op2[2]) = 0x00007f7f; +- *((int*)& __m256_op2[1]) = 0x00000001; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000001; +- *((int*)& __m256_result[6]) = 0x80007f7f; +- *((int*)& __m256_result[5]) = 0xffffafaf; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000001; +- *((int*)& __m256_result[2]) = 0x80007f7f; +- *((int*)& __m256_result[1]) = 0xffffafaf; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; +- __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0f0f0ef; +- *((unsigned long*)& __m256i_op0[2]) = 0xf0f0f0f0f0f0f0ef; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0f0f0ef; +- *((unsigned long*)& __m256i_op0[0]) = 0xf0f0f0f0f0f0f0ef; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000180007f7f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffafaf80000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000180007f7f; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffafaf80000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000070f07170; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000070f0f0ef; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000070f07170; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000070f0f0ef; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000090909090; +- *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000090909090; +- *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x95); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001504f4c4b2361; +- *((unsigned long*)& __m256i_op0[2]) = 0x303338a48f374969; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001504f4c4b2361; +- *((unsigned long*)& __m256i_op0[0]) = 0x303338a48f374969; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_result[2]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_result[1]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_result[0]) = 0xffff47b4ffff5879; +- __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x81); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op0[2]) = 0x000b2673a90896a4; +- *((unsigned long*)& __m256i_op0[1]) = 0x000050504c4c2362; +- *((unsigned long*)& __m256i_op0[0]) = 0x000b2673a90896a4; +- *((unsigned long*)& __m256i_result[3]) = 0xa90896a400000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xa90896a400000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_du(__m256i_op0,0x22); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00020421d7d41124; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00020421d7d41124; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000180007f7f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffafaf80000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000180007f7f; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffafaf80000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x55550000; +- *((int*)& __m256_op0[6]) = 0x55550000; +- *((int*)& __m256_op0[5]) = 0x55550000; +- *((int*)& __m256_op0[4]) = 0x55550000; +- *((int*)& __m256_op0[3]) = 0x55550000; +- *((int*)& __m256_op0[2]) = 0x55550000; +- *((int*)& __m256_op0[1]) = 0x55550000; +- *((int*)& __m256_op0[0]) = 0x55550000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000d5000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000d5000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000d5000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000d5000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00020421d7d41124; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00020421d7d41124; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_bu(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00220021004a007e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00220021004a007e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00220021004a007e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00220021004a007e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000f0f0f0f0; +- *((unsigned long*)& __m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ff0fff0fff0f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ff0fff0fff0f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffb10001ff8f; +- *((unsigned long*)& __m256i_result[2]) = 0x0001004c0001ff87; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffb10001ff8f; +- *((unsigned long*)& __m256i_result[0]) = 0x0001004c0001ff87; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0008000000000000; +- __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000180007f7f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffafaf80000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000180007f7f; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffafaf80000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_result[2]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_result[0]) = 0x01fe01ae00ff00ff; +- __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_result[3]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_result[2]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_result[1]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_result[0]) = 0xffff47b4ffff5879; +- __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x0000504f; +- *((int*)& __m256_op0[6]) = 0xffff3271; +- *((int*)& __m256_op0[5]) = 0xffff47b4; +- *((int*)& __m256_op0[4]) = 0xffff5879; +- *((int*)& __m256_op0[3]) = 0x0000504f; +- *((int*)& __m256_op0[2]) = 0xffff3271; +- *((int*)& __m256_op0[1]) = 0xffff47b4; +- *((int*)& __m256_op0[0]) = 0xffff5879; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xa90896a400000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xa90896a400000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_result[3]) = 0x7f7f000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x7f7f000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007f7f7f7f7f7f7f; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256d_op0[2]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256d_op0[0]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256d_op1[3]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256d_op1[2]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256d_op1[1]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256d_op1[0]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7f7f000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f7f000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextl_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff3225; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff3225; +- *((unsigned long*)& __m256i_op1[3]) = 0x2221201f1e1d1c1b; +- *((unsigned long*)& __m256i_op1[2]) = 0x1a19181716151413; +- *((unsigned long*)& __m256i_op1[1]) = 0x2221201f1e1d1c1b; +- *((unsigned long*)& __m256i_op1[0]) = 0x1a19181716151413; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000004442403; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000004442403; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x63); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7f7f000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f7f000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100010001; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff47b4ffff5878; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000b84b0000a787; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff47b4ffff5878; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000b84b0000a787; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff47b4ffff5878; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000b84b0000a787; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff47b4ffff5878; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000b84b0000a787; +- *((unsigned long*)& __m256i_result[3]) = 0xffff07b4ffff0707; +- *((unsigned long*)& __m256i_result[2]) = 0x0000b8070000a787; +- *((unsigned long*)& __m256i_result[1]) = 0xffff07b4ffff0707; +- *((unsigned long*)& __m256i_result[0]) = 0x0000b8070000a787; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x0000ffb1; +- *((int*)& __m256_op1[6]) = 0x0001ff8f; +- *((int*)& __m256_op1[5]) = 0x0001004c; +- *((int*)& __m256_op1[4]) = 0x0001ff87; +- *((int*)& __m256_op1[3]) = 0x0000ffb1; +- *((int*)& __m256_op1[2]) = 0x0001ff8f; +- *((int*)& __m256_op1[1]) = 0x0001004c; +- *((int*)& __m256_op1[0]) = 0x0001ff87; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_op0[2]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_op0[0]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_result[2]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_result[0]) = 0x01fe01ae00ff00ff; +- __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; +- __m128i_out = __lsx_vnori_b(__m128i_op0,0x7f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffe1ffffffe1; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffe1ffffffe1; +- __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000100010001; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x1313131313131313; +- *((unsigned long*)& __m128i_result[0]) = 0x1313131313131313; +- __m128i_out = __lsx_vnori_b(__m128i_op0,0xec); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffe1ffffffe1; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffe1ffffffe1; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffafffffffa; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffafffffffa; +- __m128i_out = __lsx_vmini_w(__m128i_op0,-6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfffffffafffffffa; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffffffafffffffa; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0xfffffffa; +- *((int*)& __m128_op0[2]) = 0xfffffffa; +- *((int*)& __m128_op0[1]) = 0xfffffffa; +- *((int*)& __m128_op0[0]) = 0xfffffffa; +- *((int*)& __m128_result[3]) = 0xfffffffa; +- *((int*)& __m128_result[2]) = 0xfffffffa; +- *((int*)& __m128_result[1]) = 0xfffffffa; +- *((int*)& __m128_result[0]) = 0xfffffffa; +- __m128_out = __lsx_vfrecip_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0aa077b7054c9554; +- *((unsigned long*)& __m128i_op0[0]) = 0x40c7ee1f38e4c4e8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff07b4ffff0707; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000b8070000a787; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff07b4ffff0707; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000b8070000a787; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000504fffff3271; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff47b4ffff5879; +- *((unsigned long*)& __m256i_result[3]) = 0xffffb7650000d496; +- *((unsigned long*)& __m256i_result[2]) = 0x0001800000018000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffb7650000d496; +- *((unsigned long*)& __m256i_result[0]) = 0x0001800000018000; +- __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000010000000a; +- __m256i_out = __lasx_xvmini_w(__m256i_op0,10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256d_op1[2]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256d_op1[1]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256d_op1[0]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256d_op2[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256d_op2[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000100010001; +- *((unsigned long*)& __m256d_result[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256d_result[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000100010001; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000010000000a; +- __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000080008001; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_op2[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_op2[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000001000b000b; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000001000b000b; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_op1[2]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_op1[0]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0xff00feffff00fe81; +- *((unsigned long*)& __m256i_result[2]) = 0xfe01fe51ff00ff40; +- *((unsigned long*)& __m256i_result[1]) = 0xff00feffff00fe81; +- *((unsigned long*)& __m256i_result[0]) = 0xfe01fe51ff00ff40; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000001; +- *((int*)& __m256_op0[4]) = 0x0000000a; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000001; +- *((int*)& __m256_op0[0]) = 0x0000000a; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000040; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff7f80ffff7f80; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff7f80ffff7f80; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff7f80ffff7f80; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff7f80ffff7f80; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fffeff00; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fffeff00; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_op0[2]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff010000ff017e; +- *((unsigned long*)& __m256i_op0[0]) = 0x01fe01ae00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff017e6b803fc0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff017e6b803fc0; +- __m256i_out = __lasx_xvsrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0aa077b7054c9554; +- *((unsigned long*)& __m128i_op0[0]) = 0x40c7ee1f38e4c4e8; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vsrai_h(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_b(__m128i_op0,8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000a00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000010000000a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000fffffe01fe52; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff01ff02; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffffe01fe52; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff01ff02; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000080008001; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_b(__m128i_op0,-5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,-15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007f7f7f7f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000080000000; +- __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x33); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000080008001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000080008001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000a00000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000fffff614; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000a00000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000fffff614; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000020202020; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x7ef8000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7ef8000000000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ef8000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ef8000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7ef8000000000000; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,-9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffff600000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff000009ec; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffff600000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff000009ec; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000180000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000180000001; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f6f7f7f7f6; +- *((unsigned long*)& __m256i_result[2]) = 0xf7f7f7f6f7f7f7f6; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f6f7f7f7f6; +- *((unsigned long*)& __m256i_result[0]) = 0xf7f7f7f6f7f7f7f6; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff80017fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff80017fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000280000; +- __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x30); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x7ef8000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000f; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- int_result = 0x000000007ff00000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0x92); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,-12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000000000000f; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x000000000000000f; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000280000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000140001; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000000; +- __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100008000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100007fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100008000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100007fff; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ef8000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8108000000000000; +- __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_result[2]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_result[1]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_result[0]) = 0x0a0a0a0a7f0a0a0a; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000001fffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000001fffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000001fffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000001fffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000000000001e; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100007fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100007fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100008000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100007fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100008000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100007fff; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100008000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100007fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100007fff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000140001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000140001; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010200000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010200000000; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; +- __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x35); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010200000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010200000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000080000000800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000080000000800; +- __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffe5ffffffe5; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010200000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010200000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010200000000; +- __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- unsigned_long_int_result = 0x00000000ffffffff; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x0); +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0xffffffe5; +- *((int*)& __m256_op2[6]) = 0xffffffe5; +- *((int*)& __m256_op2[5]) = 0xffffffe5; +- *((int*)& __m256_op2[4]) = 0xffffffe5; +- *((int*)& __m256_op2[3]) = 0xffffffe5; +- *((int*)& __m256_op2[2]) = 0xffffffe5; +- *((int*)& __m256_op2[1]) = 0xffffffe5; +- *((int*)& __m256_op2[0]) = 0xffffffe5; +- *((int*)& __m256_result[7]) = 0xffffffe5; +- *((int*)& __m256_result[6]) = 0xffffffe5; +- *((int*)& __m256_result[5]) = 0xffffffe5; +- *((int*)& __m256_result[4]) = 0xffffffe5; +- *((int*)& __m256_result[3]) = 0xffffffe5; +- *((int*)& __m256_result[2]) = 0xffffffe5; +- *((int*)& __m256_result[1]) = 0xffffffe5; +- *((int*)& __m256_result[0]) = 0xffffffe5; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000000f; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x61608654a2d4f6da; +- __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000001e; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffe5ffffffe5; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010200000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0c0c0c0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0014000100000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x35); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x61608654a2d4f6da; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x61608654a2d4f6da; +- *((unsigned long*)& __m128i_result[1]) = 0xfee0000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xc2c00ca844a8ecb4; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_w(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x5d20a0a1; +- *((int*)& __m256_result[6]) = 0x5d20a0a1; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x5d20a0a1; +- *((int*)& __m256_result[2]) = 0x5d20a0a1; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_l(__m256i_op0,__m256i_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x5d20a0a15d20a0a1; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x5d20a0a15d20a0a1; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0014000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f807f807f807f80; +- __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000007f7f7f7f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_result[1]) = 0x000000003fbf3fbf; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7ff8; +- __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000007f7f7f7f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007f7f7f7f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000010; +- __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x5d20a0a1; +- *((int*)& __m256_op1[6]) = 0x5d20a0a1; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x5d20a0a1; +- *((int*)& __m256_op1[2]) = 0x5d20a0a1; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x7f7f7f7f; +- *((int*)& __m128_op0[1]) = 0x00000001; +- *((int*)& __m128_op0[0]) = 0x00000010; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x61608654a2d4f6da; +- *((unsigned long*)& __m128i_result[1]) = 0xfff0800080008000; +- *((unsigned long*)& __m128i_result[0]) = 0xe160065422d476da; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff0800080008000; +- *((unsigned long*)& __m128i_op0[0]) = 0xe160065422d476da; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000d00000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000b00000010; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x61608654a2d4f6da; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ff08ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x5d20a0a1; +- *((int*)& __m256_op0[6]) = 0x5d20a0a1; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x5d20a0a1; +- *((int*)& __m256_op0[2]) = 0x5d20a0a1; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x5d20a0a15d20a0a1; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x5d20a0a15d20a0a1; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffeaffffffea; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffeaffffffea; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffeaffffffea; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffeaffffffea; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ff08ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ff08ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff0; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; +- __m128i_out = __lsx_vslei_hu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff08ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vmskltz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_w(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000002c; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000002c; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000002c; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000002c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000002c0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000002c0000; +- __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x5d20a0a15d20a0a1; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x5d20a0a15d20a0a1; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x5d20a0895d20a089; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffe8ffffffe8; +- *((unsigned long*)& __m256i_result[1]) = 0x5d20a0895d20a089; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffe8ffffffe8; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000003fbf3fbf; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7ff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3fbf3fbf00007fff; +- __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000003fbf3fbf; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7ff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff8007; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xfffffff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffint_d_lu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000003fbf3fbf; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7ff8; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000100; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff8fffffff8; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff8fffffff8; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff8fffffff8; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff8fffffff8; +- __m256i_out = __lasx_xvmini_w(__m256i_op0,-8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0000000f; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00077f88; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00077f97; +- __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000077f97; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffeff7f0000; +- __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3fbf3fbf00007fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x3fbf3fbf00007fff; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000003fbf3fbf; +- *((unsigned long*)& __m128d_op1[0]) = 0x7fff7fff7fff7ff8; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000003fbf3fbf; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7ff8; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff3fbfffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff3fbfffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fbf3fbf00007fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x007f7f7f01027f02; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff3fbfffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000100fe000100fe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; +- __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vclz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100fe000100fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000002000; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x39); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100fe000100fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x000100fe000100fe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op2[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000002000; +- __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; +- __m256d_out = __lasx_xvflogb_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100fe000100fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x31); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fbf3fbf00007fff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00100010; +- *((int*)& __m128_op0[2]) = 0x00100010; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000039; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000039; +- __m128i_out = __lsx_vclz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00002000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x1fe02000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000003f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000003f800000; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x4050000000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00003f80000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4050000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x2028000000000000; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000001fe02000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000001fe02000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4050000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_w(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00000000; +- __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4050000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x2028000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff000000ff; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,-10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x000000ff; +- *((int*)& __m128_op0[2]) = 0x000000ff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128d_result[1]) = 0x371fe00000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x371fe00000000000; +- __m128d_out = __lsx_vfcvth_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x371fe00000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x371fe00000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_result[0]) = 0x370bdfecffecffec; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op1[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x000000ff; +- *((int*)& __m128_op0[0]) = 0x000000ff; +- *((int*)& __m128_op1[3]) = 0x370bdfec; +- *((int*)& __m128_op1[2]) = 0xffecffec; +- *((int*)& __m128_op1[1]) = 0x370bdfec; +- *((int*)& __m128_op1[0]) = 0xffecffec; +- *((int*)& __m128_result[3]) = 0x370bdfec; +- *((int*)& __m128_result[2]) = 0xffecffec; +- *((int*)& __m128_result[1]) = 0x370bdfec; +- *((int*)& __m128_result[0]) = 0xffecffec; +- __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000008140c80; +- *((unsigned long*)& __m128i_result[1]) = 0x0037ffdfffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0037ffdfffeb007f; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x371fe00000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x371fe00000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ffffffffff; +- __m128i_out = __lsx_vslti_bu(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000003f3f; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_h(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00010001; +- *((int*)& __m256_op1[6]) = 0x00010001; +- *((int*)& __m256_op1[5]) = 0x00010001; +- *((int*)& __m256_op1[4]) = 0x00010001; +- *((int*)& __m256_op1[3]) = 0x00010001; +- *((int*)& __m256_op1[2]) = 0x00010001; +- *((int*)& __m256_op1[1]) = 0x00010001; +- *((int*)& __m256_op1[0]) = 0x00010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op1[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_result[1]) = 0x000000006e17bfd8; +- *((unsigned long*)& __m128i_result[0]) = 0x000000006e17bfd8; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000ffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff0100000001; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff0100000001; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrecip_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vclo_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x7f800000; +- *((int*)& __m256_op0[6]) = 0x7f800000; +- *((int*)& __m256_op0[5]) = 0x7f800000; +- *((int*)& __m256_op0[4]) = 0x7f800000; +- *((int*)& __m256_op0[3]) = 0x7f800000; +- *((int*)& __m256_op0[2]) = 0x7f800000; +- *((int*)& __m256_op0[1]) = 0x7f800000; +- *((int*)& __m256_op0[0]) = 0x7f800000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000006e17bfd8; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000006e17bfd8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffff0100000001; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffff0100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x000000006e17bfd8; +- *((unsigned long*)& __m128i_result[0]) = 0x000000006e17bfd8; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000008140c80; +- *((unsigned long*)& __m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; +- *((unsigned long*)& __m128i_result[0]) = 0x1f1f1f1f27332b9f; +- __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x36fbdfdcffdcffdc; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7ff0000000000000; +- __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_d(__m256i_op0,-2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op1[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_result[1]) = 0x370bdfec00130014; +- *((unsigned long*)& __m128i_result[0]) = 0x370bdfec00130014; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x38); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op2[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fefffffffffffff; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,-5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000008140c80; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000008140c80; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x36fbdfdcffdcffdc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000008140c80; +- *((unsigned long*)& __m128i_op2[1]) = 0x1f1f1f1f1f1f1f00; +- *((unsigned long*)& __m128i_op2[0]) = 0x1f1f1f27332b9f00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x36fbdfdcffdc0008; +- __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x3ff0010000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x3ff0010000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000008140c80; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000008140c80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000002050320; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000002050320; +- __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000008130c7f; +- *((unsigned long*)& __m128i_op1[1]) = 0x1f1f1f1f1f1f1f00; +- *((unsigned long*)& __m128i_op1[0]) = 0x1f1f1f27332b9f00; +- *((unsigned long*)& __m128i_op2[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op2[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_result[1]) = 0x06b1213ef1efa299; +- *((unsigned long*)& __m128i_result[0]) = 0x8312f5424ca4a07f; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000007fef; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fef; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000007fef; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fef; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x06b1213ef1efa299; +- *((unsigned long*)& __m128i_op0[0]) = 0x8312f5424ca4a07f; +- *((unsigned long*)& __m128i_op1[1]) = 0x1f1f1f1f1f1f1f00; +- *((unsigned long*)& __m128i_op1[0]) = 0x1f1f1f27332b9f00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xa23214697fd03f7f; +- __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7f70000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x7f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fef; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fef; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fef; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fef; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000007fee; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000fedd; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000fedd; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000fedd; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000fedd; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000002050320; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000002050320; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000002050320; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000002050320; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7f70000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000012; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x370bdfec00130014; +- *((unsigned long*)& __m128i_op0[0]) = 0x370bdfec00130014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000370bffffdfec; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001300000014; +- __m128i_out = __lsx_vexth_w_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x800080008000ffee; +- *((unsigned long*)& __m256i_result[2]) = 0x800080008000ffee; +- *((unsigned long*)& __m256i_result[1]) = 0x800080008000ffee; +- *((unsigned long*)& __m256i_result[0]) = 0x800080008000ffee; +- __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001c88bf0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001c88bf0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001c88bf0; +- __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000002050320; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000002050320; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001c88bf0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000320; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000007730; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001c88bf0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001c88bf0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x7f70000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x7f70000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x7f70000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x7f70000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op0[0]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_result[1]) = 0x00000dc300003ffb; +- *((unsigned long*)& __m128i_result[0]) = 0x00000dc300003ffb; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000dc300003ffb; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000dc300003ffb; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000ffff3fbfffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x7fffffff7fffffff; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x7ffffffb; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001c88bf0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000320; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000007730; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007fee; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xa23214697fd03f7f; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007ffffffb; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x010101017f010101; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff810011; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff810011; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff810011; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff810011; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op0[0]) = 0x370bdfeca2eb9931; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00d3007c014e00bd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200020002; +- *((unsigned long*)& __m128i_result[0]) = 0x06e1000e00030005; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000002050320; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000002050320; +- *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[0]) = 0x010101017f010101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000040600000406; +- *((unsigned long*)& __m128i_result[0]) = 0x020202020202fe02; +- __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xfff70156; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xfff70156; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xfff70156; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xfff70156; +- *((int*)& __m256_op1[7]) = 0x7fefffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0x7fefffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x7fefffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0x7fefffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffff70156; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffff70156; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffff70156; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffff70156; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xa2321469; +- *((int*)& __m128_op0[0]) = 0x7fd03f7f; +- *((int*)& __m128_op1[3]) = 0x00000406; +- *((int*)& __m128_op1[2]) = 0x00000406; +- *((int*)& __m128_op1[1]) = 0x02020202; +- *((int*)& __m128_op1[0]) = 0x0202fe02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000040600000406; +- *((unsigned long*)& __m128i_op0[0]) = 0x020202020202fe02; +- *((unsigned long*)& __m128i_result[1]) = 0xfff503fbfff503fb; +- *((unsigned long*)& __m128i_result[0]) = 0x01f701f701f7fdf7; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff810011; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff810011; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x3fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[2]) = 0x3fff7fffffc08008; +- *((unsigned long*)& __m256i_result[1]) = 0x3fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[0]) = 0x3fff7fffffc08008; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000040600000406; +- *((unsigned long*)& __m128i_op0[0]) = 0x020202020202fe02; +- *((unsigned long*)& __m128i_result[1]) = 0x0020200000202000; +- *((unsigned long*)& __m128i_result[0]) = 0x002020000fe02000; +- __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x7fefffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0x7fefffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x7fefffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0x7fefffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7fefffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0x7fefffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0x7fefffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0x7fefffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff810011; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff810011; +- *((unsigned long*)& __m256i_op1[3]) = 0x3fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x3fff8000ffa08004; +- *((unsigned long*)& __m256i_op1[1]) = 0x3fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x3fff8000ffa08004; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; +- __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x13f9c5b60028a415; +- *((unsigned long*)& __m128i_op0[0]) = 0x545cab1d7e57c415; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x13f9c5b60028a415; +- *((unsigned long*)& __m128i_result[0]) = 0x545cab1d81a83bea; +- __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op0[0]) = 0x370bdfeca2eb9931; +- *((unsigned long*)& __m128i_op1[1]) = 0x370bdfecffecffec; +- *((unsigned long*)& __m128i_op1[0]) = 0x370bdfeca2eb9931; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x370bdfec; +- *((int*)& __m128_op0[2]) = 0xffecffec; +- *((int*)& __m128_op0[1]) = 0x370bdfec; +- *((int*)& __m128_op0[0]) = 0xa2eb9931; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff810011; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff810011; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[2]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[0]) = 0x817f11ed81800ff0; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000aaaa; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000545cab1d; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000081a83bea; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x00d3007c014e00bd; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000aaaa; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x7fef010000010100; +- *((unsigned long*)& __m256i_result[2]) = 0x7fef010000010100; +- *((unsigned long*)& __m256i_result[1]) = 0x7fef010000010100; +- *((unsigned long*)& __m256i_result[0]) = 0x7fef010000010100; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000aaaa; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffff70156; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffff70156; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffff70156; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffff70156; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x74); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000545cab1d; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000081a83bea; +- *((unsigned long*)& __m128i_op1[1]) = 0x13f9c5b60028a415; +- *((unsigned long*)& __m128i_op1[0]) = 0x545cab1d81a83bea; +- *((unsigned long*)& __m128i_result[1]) = 0x00400000547cab1d; +- *((unsigned long*)& __m128i_result[0]) = 0x2000000081a83fea; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x14ccc6320176a4d2; +- *((unsigned long*)& __m128d_op0[0]) = 0x685670d37e80682a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000080; +- __m128i_out = __lsx_vfclass_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x13f9c5b60028a415; +- *((unsigned long*)& __m128d_op1[0]) = 0x545cab1d81a83bea; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x14ccc6320176a4d2; +- *((unsigned long*)& __m128d_op0[0]) = 0x685670d37e80682a; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x14ccc6320176a4d2; +- *((unsigned long*)& __m128d_op0[0]) = 0x685670d37e80682a; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320176a4d2; +- *((unsigned long*)& __m128i_op0[0]) = 0x685670d37e80682a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x14ccc6320176a4d2; +- *((unsigned long*)& __m128i_result[0]) = 0x685670d37e80682a; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0xffff8180ffff8181; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0xffff8180ffff8181; +- __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320176a4d2; +- *((unsigned long*)& __m128i_op0[0]) = 0x685670d37e80682a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x14ccc6320076a4d2; +- *((unsigned long*)& __m128i_result[0]) = 0x685670d27e00682a; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffe05fc47b400; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffe06003fc000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffe05fc47b400; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffe06003fc000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x14ccc6320076a4d2; +- *((unsigned long*)& __m128i_op1[0]) = 0x685670d27e00682a; +- *((unsigned long*)& __m128i_result[1]) = 0x14ccc6320076a4d2; +- *((unsigned long*)& __m128i_result[0]) = 0x685670d27e00682a; +- __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x9d9d9d9d9d9d9d9d; +- *((unsigned long*)& __m128i_result[0]) = 0x9d9d9d9d9d9d9d9d; +- __m128i_out = __lsx_vnori_b(__m128i_op0,0x62); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff810011; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff810011; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff8180ffff8181; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff8180ffff8181; +- *((unsigned long*)& __m256i_result[3]) = 0x000000008000ff00; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff81ff81; +- *((unsigned long*)& __m256i_result[1]) = 0x000000008000ff00; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff81ff81; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0fff01800fff0181; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0fff01800fff0181; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0007ff800007ff80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0007ff800007ff80; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x14ccc632; +- *((int*)& __m128_op0[2]) = 0x0076a4d2; +- *((int*)& __m128_op0[1]) = 0x685670d2; +- *((int*)& __m128_op0[0]) = 0x7e00682a; +- *((int*)& __m128_op1[3]) = 0x14ccc632; +- *((int*)& __m128_op1[2]) = 0x0076a4d2; +- *((int*)& __m128_op1[1]) = 0x685670d2; +- *((int*)& __m128_op1[0]) = 0x7e00682a; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000001; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0x0fff0180; +- *((int*)& __m256_op0[4]) = 0x0fff0181; +- *((int*)& __m256_op0[3]) = 0x00000001; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x0fff0180; +- *((int*)& __m256_op0[0]) = 0x0fff0181; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000545cffffab1d; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff81a800003bea; +- *((unsigned long*)& __m128i_op1[1]) = 0x13f9c5b60028a415; +- *((unsigned long*)& __m128i_op1[0]) = 0x545cab1d81a83bea; +- *((unsigned long*)& __m128i_result[1]) = 0x0000545cffff0001; +- *((unsigned long*)& __m128i_result[0]) = 0xffff81a800003bea; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff800000003; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x00197d3200197d56; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x00197d3200197d56; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_h(__m256i_op0,-10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320076a4d2; +- *((unsigned long*)& __m128i_op0[0]) = 0x685670d27e00682a; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x14ccc631eb3339ce; +- *((unsigned long*)& __m128i_result[0]) = 0x685670d197a98f2e; +- __m128i_out = __lsx_vmulwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[2]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[0]) = 0x817f11ed81800ff0; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x817f11ed81800ff0; +- *((unsigned long*)& __m256i_result[3]) = 0x00000004fc480040; +- *((unsigned long*)& __m256i_result[2]) = 0x00000004fc480040; +- *((unsigned long*)& __m256i_result[1]) = 0x00000004fc480040; +- *((unsigned long*)& __m256i_result[0]) = 0x00000004fc480040; +- __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x13f9c5b60028a415; +- *((unsigned long*)& __m128i_op0[0]) = 0x545cab1d81a83bea; +- *((unsigned long*)& __m128i_op1[1]) = 0x13f9c5b60028a415; +- *((unsigned long*)& __m128i_op1[0]) = 0x545cab1d81a83bea; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff0015172b; +- __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x14ccc6320076a4d2; +- *((unsigned long*)& __m128i_op0[0]) = 0x685670d27e00682a; +- *((unsigned long*)& __m128i_op1[1]) = 0x14ccc6320076a4d2; +- *((unsigned long*)& __m128i_op1[0]) = 0x685670d27e00682a; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; +- __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x00197d3200197d56; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x00197d3200197d56; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffff800000003; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0015172b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001900000019; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001900000019; +- __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000300000003; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000300000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000300000003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000300000003; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000300000003; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000300000003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000300000003; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000300000003; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffffd; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffd; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffffd; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffd; +- __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- long_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0x0015172b; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xfffffffe; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xfffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffff0015172b; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff0015172b; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff0015172b; +- __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; +- __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0015172b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffb00151727; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op2[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op2[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x00010000fffffffc; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x20fc000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x20fc000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffb00151727; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0015172b; +- *((unsigned long*)& __m128i_op2[1]) = 0x00010000fffffffc; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffb00151727; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00010000fffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x00010000fffffffc; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; +- __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffebeeaaefafb; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffebeeaaeeeeb; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffebeeaaefafb; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffebeeaaeeeeb; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x7fefffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x7fefffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x7fffffff; +- *((int*)& __m256_op0[6]) = 0x7fffffff; +- *((int*)& __m256_op0[5]) = 0x7fffffff; +- *((int*)& __m256_op0[4]) = 0x7fffffff; +- *((int*)& __m256_op0[3]) = 0x7fffffff; +- *((int*)& __m256_op0[2]) = 0x7fffffff; +- *((int*)& __m256_op0[1]) = 0x7fffffff; +- *((int*)& __m256_op0[0]) = 0x7fffffff; +- *((int*)& __m256_op1[7]) = 0x20fc0000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x20fc0000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffebeeaaefafb; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffebeeaaeeeeb; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffebeeaaefafb; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffebeeaaeeeeb; +- *((unsigned long*)& __m256i_result[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x7fefffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x7fefffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x01ffbfff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x03ffffff03ffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x01ffbfff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x03ffffff03ffffff; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x26); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xfbffffff; +- *((int*)& __m128_op0[0]) = 0x27001517; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x0000ffff; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x14ccc631eb3339ce; +- *((unsigned long*)& __m128i_op0[0]) = 0x685670d197a98f2e; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00fe00fe00fe0045; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe00fe0045; +- __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff46; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0x43f0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x43f0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x43f0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x43f0000000000000; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff46; +- *((unsigned long*)& __m128i_op1[1]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe00fe0045; +- *((unsigned long*)& __m128i_result[1]) = 0x007f007f007f007e; +- *((unsigned long*)& __m128i_result[0]) = 0x007f007f007effc6; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x007f007f007f007e; +- *((unsigned long*)& __m128d_op1[0]) = 0x007f007f007effc6; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff1fffffff1; +- __m128i_out = __lsx_vsubi_wu(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff46; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffe00000002; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff46000000ba; +- __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffe00000002; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff46000000ba; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffa30000005c; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x43f0000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x43f0000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x43f0000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x43f0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff46; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x4c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x001f001f001f001f; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0xa3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x001f001f001f001f; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000001001f001e; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000001001f001e; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000fffe0000ff45; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff000000b9; +- *((unsigned long*)& __m128i_op1[1]) = 0xffd5002affffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x343d8dc6b0ed5a08; +- *((unsigned long*)& __m128i_result[1]) = 0x012b012c01010246; +- *((unsigned long*)& __m128i_result[0]) = 0x353e743b50135a4f; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffd5002affffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x343d8dc6b0ed5a08; +- *((unsigned long*)& __m128i_result[1]) = 0x002affd600000001; +- *((unsigned long*)& __m128i_result[0]) = 0xcbc2723a4f12a5f8; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffe05fc47b400; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffe06003fc000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffe05fc47b400; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffe06003fc000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; +- __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x01ff020000ff03ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x01346b8d00b04c5a; +- *((unsigned long*)& __m128i_op1[1]) = 0x002affd600000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xcbc2723a4f12a5f8; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x01ff020000ff03ff; +- *((unsigned long*)& __m128i_result[0]) = 0x01346b8d00b04c5a; +- __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7eeefefefefefefe; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x7eeefefefefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x002affd600000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xcbc2723a4f12a5f8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffd60001723aa5f8; +- __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x002affd600000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xcbc2723a4f12a5f8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x343d8dc5b0ed5a08; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; +- __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_d(__m128i_op0,12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x343d8dc5b0ed5a08; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x353c8cc4b1ec5b09; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000010101010; +- *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000010101010; +- *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffe05fc47b400; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffe06003fc000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffe05fc47b400; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffe06003fc000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op0[0]) = 0x353c8cc4b1ec5b09; +- *((unsigned long*)& __m128i_op1[1]) = 0x002affd600000001; +- *((unsigned long*)& __m128i_op1[0]) = 0xcbc2723a4f12a5f8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808000000035; +- __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7eeefefefefefefe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x7eeefefefefefefe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7e00ee00fe00fe00; +- *((unsigned long*)& __m256i_result[2]) = 0xfe00fe00fe00fe00; +- *((unsigned long*)& __m256i_result[1]) = 0x7e00ee00fe00fe00; +- *((unsigned long*)& __m256i_result[0]) = 0xfe00fe00fe00fe00; +- __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00f525682ffd27f2; +- *((unsigned long*)& __m128i_op0[0]) = 0x00365c60317ff930; +- *((unsigned long*)& __m128i_result[1]) = 0xe500c085c000c005; +- *((unsigned long*)& __m128i_result[0]) = 0xe5c1a185c48004c5; +- __m128i_out = __lsx_vnori_b(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x61); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffd60001723aa5f8; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007f007f7f; +- *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff00ff; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; +- __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe500c085c000c005; +- *((unsigned long*)& __m128i_op0[0]) = 0xe5c1a185c48004c5; +- *((unsigned long*)& __m128i_result[1]) = 0xffffe500ffffc085; +- *((unsigned long*)& __m128i_result[0]) = 0xffffc000ffffc005; +- __m128i_out = __lsx_vexth_w_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fefe0000fefe; +- *((unsigned long*)& __m256i_result[2]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fefe0000fefe; +- *((unsigned long*)& __m256i_result[0]) = 0x00fe00fe00fe00fe; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_result[2]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256d_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_result[0]) = 0x00ff00fe00ff00fe; +- __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808000000035; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000200000000; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00fe00fe; +- __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0d1202e19235e2bc; +- *((unsigned long*)& __m128i_op0[0]) = 0xea38e0f75f6e56d1; +- *((unsigned long*)& __m128i_result[1]) = 0x2f3626e7b637e6be; +- *((unsigned long*)& __m128i_result[0]) = 0xee3ee6f77f6e76f7; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x26); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x7fef0000ffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fef0000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fefffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7fef7fef7fef7fef; +- *((unsigned long*)& __m256i_result[2]) = 0x7fef7fef7fef7fef; +- *((unsigned long*)& __m256i_result[1]) = 0x7fef7fef7fef7fef; +- *((unsigned long*)& __m256i_result[0]) = 0x7fef7fef7fef7fef; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe500ffffc085; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffc000ffffc005; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffc000ffffc005; +- __m128i_out = __lsx_vextl_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0d1202e19235e2bc; +- *((unsigned long*)& __m128i_op0[0]) = 0xea38e0f75f6e56d1; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffe500ffffc085; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffc000ffffc005; +- *((unsigned long*)& __m128i_result[1]) = 0xffff00000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xe500c085; +- *((int*)& __m128_op0[2]) = 0xc000c005; +- *((int*)& __m128_op0[1]) = 0xe5c1a185; +- *((int*)& __m128_op0[0]) = 0xc48004c5; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffc000; +- *((int*)& __m128_op1[0]) = 0xffffc005; +- *((int*)& __m128_op2[3]) = 0xff550025; +- *((int*)& __m128_op2[2]) = 0x002a004b; +- *((int*)& __m128_op2[1]) = 0x00590013; +- *((int*)& __m128_op2[0]) = 0x005cffca; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0xffffc000; +- *((int*)& __m128_result[0]) = 0xffffc005; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fef0000ffff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fef0000ffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0xde00fe0000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fe010000fe01; +- *((unsigned long*)& __m256i_result[1]) = 0xde00fe0000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fe010000fe01; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffc000ffffc005; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_w(__m128i_op0,6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_result[1]) = 0x0000080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0000080800000808; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000007070707; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff07070707; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000007070707; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff07070707; +- __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fef7fef7fef7fef; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fef7fef7fef7fef; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fef7fef7fef7fef; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fef7fef7fef7fef; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xde00fe00; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x0000fe01; +- *((int*)& __m256_op0[4]) = 0x0000fe01; +- *((int*)& __m256_op0[3]) = 0xde00fe00; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x0000fe01; +- *((int*)& __m256_op0[0]) = 0x0000fe01; +- *((int*)& __m256_op1[7]) = 0x0000ffff; +- *((int*)& __m256_op1[6]) = 0x0000ffff; +- *((int*)& __m256_op1[5]) = 0x00ff00fe; +- *((int*)& __m256_op1[4]) = 0x00ff00fe; +- *((int*)& __m256_op1[3]) = 0x0000ffff; +- *((int*)& __m256_op1[2]) = 0x0000ffff; +- *((int*)& __m256_op1[1]) = 0x00ff00fe; +- *((int*)& __m256_op1[0]) = 0x00ff00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xde00fe0000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fe010000fe01; +- *((unsigned long*)& __m256i_op0[1]) = 0xde00fe0000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fe010000fe01; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; +- __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe500ffffc085; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffc000ffffc005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001300000012; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001200000012; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbe8282a0793636d3; +- *((unsigned long*)& __m128i_op0[0]) = 0x793636d3793636d3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_bu(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0202020202020202; +- *((unsigned long*)& __m128i_op0[0]) = 0x363d753d50155c0a; +- *((unsigned long*)& __m128i_op1[1]) = 0xe500c085c000c005; +- *((unsigned long*)& __m128i_op1[0]) = 0xe5c1a185c48004c5; +- *((unsigned long*)& __m128i_result[1]) = 0x0002020002020200; +- *((unsigned long*)& __m128i_result[0]) = 0x021f3b0205150600; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002020002020200; +- *((unsigned long*)& __m128i_op0[0]) = 0x021f3b0205150600; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000300400002; +- *((unsigned long*)& __m128i_op1[0]) = 0x000100010040fffb; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000300400002; +- *((unsigned long*)& __m128i_result[0]) = 0x000100010040fffb; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000545400; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000545400; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000fe; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100fe04ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100fe04ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff00ff; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a3a3a3b3a3a3a3a; +- *((unsigned long*)& __m128i_op0[0]) = 0x3a3a00003a3a0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_h(__m128i_op0,-5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op0[0]) = 0x353c8cc4b1ec5b09; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff00000000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8080008000808080; +- *((unsigned long*)& __m128i_result[0]) = 0x1a9e466258f62d84; +- __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff00000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0202020202020202; +- *((unsigned long*)& __m128i_op1[0]) = 0x363d753d50155c0a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff400000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff400000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00fe00fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_result[2]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_result[1]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_result[0]) = 0x007f8080007f007f; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x808080e280808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080636380806363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080638063; +- __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffff040000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffff040000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffff0000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffff0000; +- *((int*)& __m256_op0[4]) = 0xffff0000; +- *((int*)& __m256_op0[3]) = 0xffff0000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffff0000; +- *((int*)& __m256_op0[0]) = 0xffff0000; +- *((int*)& __m256_op1[7]) = 0x007f8080; +- *((int*)& __m256_op1[6]) = 0x007f007f; +- *((int*)& __m256_op1[5]) = 0x007f8080; +- *((int*)& __m256_op1[4]) = 0x007f007f; +- *((int*)& __m256_op1[3]) = 0x007f8080; +- *((int*)& __m256_op1[2]) = 0x007f007f; +- *((int*)& __m256_op1[1]) = 0x007f8080; +- *((int*)& __m256_op1[0]) = 0x007f007f; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x01ef013f01e701f8; +- *((unsigned long*)& __m128i_op1[0]) = 0x35bb8d32b2625c00; +- *((unsigned long*)& __m128i_result[1]) = 0x00008d3200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0xea); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x808080e280808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080636380806363; +- *((unsigned long*)& __m128i_op1[1]) = 0x808080e280808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080636380806363; +- *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080638063; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080638063; +- __m128i_out = __lsx_vbitclri_h(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0x00000001; +- *((int*)& __m128_op0[1]) = 0xffffffee; +- *((int*)& __m128_op0[0]) = 0x00000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x007f8080; +- *((int*)& __m256_op0[6]) = 0x007f007f; +- *((int*)& __m256_op0[5]) = 0x007f8080; +- *((int*)& __m256_op0[4]) = 0x007f007f; +- *((int*)& __m256_op0[3]) = 0x007f8080; +- *((int*)& __m256_op0[2]) = 0x007f007f; +- *((int*)& __m256_op0[1]) = 0x007f8080; +- *((int*)& __m256_op0[0]) = 0x007f007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007f3f7f007f1f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007f3f7f007f1f; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffff00000000ffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffee00000004; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x80808080; +- *((int*)& __m128_op0[0]) = 0x80638063; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrph_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x03ff000003ff03ff; +- *((unsigned long*)& __m256i_result[2]) = 0x03ff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x03ff000003ff03ff; +- *((unsigned long*)& __m256i_result[0]) = 0x03ff000000000000; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_op1[2]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_op1[1]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_op1[0]) = 0x007f8080007f007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffee00000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x3a3a3a3b3a3a3a3a; +- *((unsigned long*)& __m128i_op1[0]) = 0x3a3a00003a3a0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000003a0000003a; +- __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x38); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0x00000001; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000002; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0x00000001; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000002; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0x00000001; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000002; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0x00000001; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000002; +- __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfc00ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000100fe000100fe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfc00ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000100fe000100fe; +- *((unsigned long*)& __m256i_result[3]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_result[2]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_result[1]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_result[0]) = 0x00fe00fe00fe00fe; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x1b1b1b1b1b1b1b1b; +- *((unsigned long*)& __m256i_result[2]) = 0x1b1b1b1b1b1b1b1b; +- *((unsigned long*)& __m256i_result[1]) = 0x1b1b1b1b1b1b1b1b; +- *((unsigned long*)& __m256i_result[0]) = 0x1b1b1b1b1b1b1b1b; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; +- __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00008d3200000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x09e8e9012fded7fd; +- *((unsigned long*)& __m128i_op1[0]) = 0x479f64b03373df61; +- *((unsigned long*)& __m128i_result[1]) = 0x00008d3200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00040004; +- *((int*)& __m128_op0[2]) = 0x00040004; +- *((int*)& __m128_op0[1]) = 0x00040004; +- *((int*)& __m128_op0[0]) = 0x00040004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff00000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000fffe; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x09e8e9012fded7fd; +- *((unsigned long*)& __m128i_op1[0]) = 0x479f64b03373df61; +- *((unsigned long*)& __m128i_result[1]) = 0x09e8e9012fded7fd; +- *((unsigned long*)& __m128i_result[0]) = 0x479f64b03373df61; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00040004; +- *((int*)& __m128_op0[2]) = 0x00040004; +- *((int*)& __m128_op0[1]) = 0x00040004; +- *((int*)& __m128_op0[0]) = 0x00040004; +- *((unsigned long*)& __m128d_result[1]) = 0x37c0001000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x37c0001000000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x37c0001000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x37c0001000000001; +- __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x77c0401040004000; +- *((unsigned long*)& __m128i_result[0]) = 0x77c0401040004000; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_result[1]) = 0x0100000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0100000000000000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x36); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; +- __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffff0400; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0xffff0400; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff040000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; +- *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000003a0000003a; +- *((unsigned long*)& __m128i_result[1]) = 0x77c0404a4000403a; +- *((unsigned long*)& __m128i_result[0]) = 0x77c03fd640003fc6; +- __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; +- *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; +- *((unsigned long*)& __m128i_result[1]) = 0x75c0404a4200403a; +- *((unsigned long*)& __m128i_result[0]) = 0x75c03fd642003fc6; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff00000000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xb9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_op1[1]) = 0x77c0404a4000403a; +- *((unsigned long*)& __m128i_op1[0]) = 0x77c03fd640003fc6; +- *((unsigned long*)& __m128i_result[1]) = 0x04c0044a0400043a; +- *((unsigned long*)& __m128i_result[0]) = 0x04c004d6040004c6; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; +- *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_result[1]) = 0x0003c853c843c844; +- *((unsigned long*)& __m128i_result[0]) = 0x0003c853c843c844; +- __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x09e8e9012fded7fd; +- *((unsigned long*)& __m128i_op0[0]) = 0x479f64b03373df61; +- *((unsigned long*)& __m128i_op1[1]) = 0x04c0044a0400043a; +- *((unsigned long*)& __m128i_op1[0]) = 0x04c004d6040004c6; +- *((unsigned long*)& __m128i_result[1]) = 0x1d20db00ec967bec; +- *((unsigned long*)& __m128i_result[0]) = 0x00890087009b0099; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xff00ffff00000001; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256d_op0[1]) = 0xff00ffff00000001; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x04c0044a0400043a; +- *((unsigned long*)& __m128i_op0[0]) = 0x04c004d6040004c6; +- *((unsigned long*)& __m128i_op1[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_result[1]) = 0x044a043a04d604c6; +- *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00fe00fe; +- *((int*)& __m256_op0[6]) = 0x00fe00fe; +- *((int*)& __m256_op0[5]) = 0x00fe00fe; +- *((int*)& __m256_op0[4]) = 0x00fe00fe; +- *((int*)& __m256_op0[3]) = 0x00fe00fe; +- *((int*)& __m256_op0[2]) = 0x00fe00fe; +- *((int*)& __m256_op0[1]) = 0x00fe00fe; +- *((int*)& __m256_op0[0]) = 0x00fe00fe; +- *((int*)& __m256_op1[7]) = 0x00fe00fe; +- *((int*)& __m256_op1[6]) = 0x00fe00fe; +- *((int*)& __m256_op1[5]) = 0x00fe00fe; +- *((int*)& __m256_op1[4]) = 0x00fe00fe; +- *((int*)& __m256_op1[3]) = 0x00fe00fe; +- *((int*)& __m256_op1[2]) = 0x00fe00fe; +- *((int*)& __m256_op1[1]) = 0x00fe00fe; +- *((int*)& __m256_op1[0]) = 0x00fe00fe; +- *((int*)& __m256_result[7]) = 0x3f800000; +- *((int*)& __m256_result[6]) = 0x3f800000; +- *((int*)& __m256_result[5]) = 0x3f800000; +- *((int*)& __m256_result[4]) = 0x3f800000; +- *((int*)& __m256_result[3]) = 0x3f800000; +- *((int*)& __m256_result[2]) = 0x3f800000; +- *((int*)& __m256_result[1]) = 0x3f800000; +- *((int*)& __m256_result[0]) = 0x3f800000; +- __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x77c0404a4000403a; +- *((unsigned long*)& __m128i_op0[0]) = 0x77c03fd640003fc6; +- *((unsigned long*)& __m128i_result[1]) = 0x00f0008100800080; +- *((unsigned long*)& __m128i_result[0]) = 0x00f0008000800080; +- __m128i_out = __lsx_vsrari_h(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c844; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c844; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000003a0000003a; +- *((unsigned long*)& __m128d_op1[1]) = 0x37c0001000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000003a0000003a; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x37c0001000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x37c0001000000008; +- __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_h(__m128i_op0,3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000003a0000003a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000003a0000003a; +- __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x007f0000007f0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x007f0000007f0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00f0008100800080; +- *((unsigned long*)& __m128i_op0[0]) = 0x00f000807000009e; +- *((unsigned long*)& __m128i_op1[1]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op1[0]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000ec382e; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ec382d; +- __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00f0008100800080; +- *((unsigned long*)& __m128d_op0[0]) = 0x00f000807000009e; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x007f0000007f0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x007f0000007f0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000003f8000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000003f8000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff000000ff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000003f8000004; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000003f8000004; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000691a6c843c8fc; +- *((unsigned long*)& __m128i_result[0]) = 0x000691a6918691fc; +- __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_w(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op1[0]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_result[1]) = 0xd6d7ded7ded7defe; +- *((unsigned long*)& __m128i_result[0]) = 0xd6d7ded7ded7defe; +- __m128i_out = __lsx_vori_b(__m128i_op0,0xd6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000020000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000020000000200; +- __m128i_out = __lsx_vfclass_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff000000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001c8520000c97d; +- *((unsigned long*)& __m128i_result[0]) = 0x0001c8520001c87d; +- __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000020000000200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000020000000200; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000020000000200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000020000000200; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000003f8000004; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000003f8000004; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000003f8000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000003f8000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_op1[0]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_result[1]) = 0x0003c853c843c87e; +- *((unsigned long*)& __m128i_result[0]) = 0x0003c853c843c87e; +- __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffff7; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffff7; +- __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_b(__m256i_op0,15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvmskltz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000007f8; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000007f8; +- *((unsigned long*)& __m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; +- *((unsigned long*)& __m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000007f8; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000007f8; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; +- __m128i_out = __lsx_vsat_hu(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000007f8; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000007f8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000f80007; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000f8; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrecip_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x4a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- long_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0006000000040000; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000f80007; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000f80007; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000006c80031; +- __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000006c80031; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_d(__m128i_op0,0x3c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffcfd000000fb00; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001fe00f8000700; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0xfdfef9ff0efff900; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0xfdfef9ff0efff900; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffcfd000000fb00; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001fe00f8000700; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000fb01; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000007000000; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000fb01; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000007000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000fb01; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000e0000; +- __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128d_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op1[0]) = 0xfdfef9ff0efff900; +- *((unsigned long*)& __m128d_result[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128d_result[0]) = 0x6363636363636363; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x6363636363636363; +- __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000c000c000c000c; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x000000ff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x000000ff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00018d8e00018d8e; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff7; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x807fffff80800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; +- *((unsigned long*)& __m128i_result[1]) = 0x8003000000020000; +- *((unsigned long*)& __m128i_result[0]) = 0x4040ffffc0400004; +- __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8003000000020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4040ffffc0400004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8003000000020000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x64); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256i_result[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff00007fff; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256d_op0[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256d_result[2]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256d_result[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256d_result[0]) = 0x00007fff00007fff; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0086000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0082000000000007; +- *((unsigned long*)& __m128d_result[1]) = 0x4160c00000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x4110000000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0001; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0086000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0082000000000007; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0086000000040000; +- *((unsigned long*)& __m128i_result[0]) = 0x0082000000000007; +- __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffff0000; +- *((int*)& __m256_op1[4]) = 0xffff0001; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffff0000; +- *((int*)& __m256_op1[0]) = 0xffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000d0000000d; +- __m128i_out = __lsx_vmini_w(__m128i_op0,13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000d0000000d; +- *((unsigned long*)& __m128i_op1[1]) = 0x8006000000040000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8002000000000007; +- *((unsigned long*)& __m128i_result[1]) = 0x8006000000040000; +- *((unsigned long*)& __m128i_result[0]) = 0x8002000d00000014; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000006362ffff; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000000000007; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000600000004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000636500006363; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x31b1777777777776; +- *((unsigned long*)& __m128i_op0[0]) = 0x6eee282828282829; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000006362ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000d0000000d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x6363635663636356; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000006362ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000d0000000d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000dffff000d; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000000000007; +- *((unsigned long*)& __m128i_op1[1]) = 0x31b1777777777776; +- *((unsigned long*)& __m128i_op1[0]) = 0x6eee282828282829; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000dffff000d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffffff; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x6b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000d0000000d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000dffff000d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000070007; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000007ffff; +- __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,-15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[1]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00001fff00001fff; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000800c00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0002; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0002; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0002; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0002; +- __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_wu(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000068; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000001f; +- __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[3]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[1]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00001fff00001fff; +- __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0000001f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000070007; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000007ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000068; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000038003; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000040033; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000068; +- *((unsigned long*)& __m128d_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128d_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000068; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080000068; +- __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; +- __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_du_wu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0200000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0200000000000000; +- __m256i_out = __lasx_xvssrlni_du_q(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffff00; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffff00; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffff00; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffff00; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fefe7f00; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fefe7f00; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000038003; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000040033; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000068; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_result[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_result[0]) = 0x0020000000200000; +- __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[1]) = 0x00001fff00001fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00001fff00001fff; +- __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000038003; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000040033; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100080000; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000068; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000038003; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000040033; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100080000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffefff80000; +- __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe0000fffe0002; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffe0000fffe0002; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_d(__m128i_op0,12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe0000fffe0002; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffe0000fffe0002; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000fffeffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffeffff; +- __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffintl_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100080000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x80000000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandi_b(__m128i_op0,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001e0000001e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001e0000001e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001e0000001e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001e0000001e; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_op1[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0020000000200000; +- long_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_result[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000001e0000001e; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000001e0000001e; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000001e0000001e; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000001e0000001e; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffeffee; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe0000fffe0012; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffeffee; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe0000fffe0012; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffefffefffeffee; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe0000fffe0012; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffefffefffeffee; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffe0000fffe0012; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffeffee; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe0000fffe0012; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffeffee; +- *((unsigned long*)& __m256i_result[0]) = 0xfffe0000fffe0012; +- __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffeffee; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe0000fffe0012; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffeffee; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe0000fffe0012; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000001ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000001ffff; +- __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000001000000010; +- *((unsigned long*)& __m256d_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256d_op1[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0020002000200020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x1010101010001000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x1010101000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0xb); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0004000404040404; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[1]) = 0x0004000400000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1010101010001000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x1010101000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x1010101010001000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x101010100000000e; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op2[0]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_result[1]) = 0x00000f02e1f80f04; +- *((unsigned long*)& __m128i_result[0]) = 0x00000f02e1f80f04; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001000000000; +- __m128i_out = __lsx_vclo_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1010101010001000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x101010100000000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0889088908810881; +- *((unsigned long*)& __m256i_result[2]) = 0x0081010000810100; +- *((unsigned long*)& __m256i_result[1]) = 0x0889088900810088; +- *((unsigned long*)& __m256i_result[0]) = 0x0081010000810100; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000100010001ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000100010001ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000100010001ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000100010001ffff; +- __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0889088908810881; +- *((unsigned long*)& __m256i_op0[2]) = 0x0081010000810100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0889088900810088; +- *((unsigned long*)& __m256i_op0[0]) = 0x0081010000810100; +- *((unsigned long*)& __m256i_result[3]) = 0x0004448444844084; +- *((unsigned long*)& __m256i_result[2]) = 0x0000408080004080; +- *((unsigned long*)& __m256i_result[1]) = 0x0004448444804080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000408080004080; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00007ff000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007ff000000000; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000100010001ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000100010001ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000100010001ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000100010001ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00007ff000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007ff000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x79); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x000000007ff00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_d(__m128i_op0,-10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7f8f7f8f800f800; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f780000ff80; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7f8f7f80000fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003f780000ff80; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f8f7f8f800f800; +- *((unsigned long*)& __m256i_result[2]) = 0x00003f784000ff80; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f8f7f84000fff9; +- *((unsigned long*)& __m256i_result[0]) = 0x00003f784000ff80; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xf7f8f7f8f800f800; +- *((unsigned long*)& __m256d_op1[2]) = 0x00003f784000ff80; +- *((unsigned long*)& __m256d_op1[1]) = 0xf7f8f7f84000fff9; +- *((unsigned long*)& __m256d_op1[0]) = 0x00003f784000ff80; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xff800000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xff800000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7f8f7f8f800f800; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f784000ff80; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7f8f7f84000fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003f784000ff80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000f7f8f7f8; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000003f78; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000f7f8f7f8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000003f78; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vclz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xff80000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xff80000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x8060000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x8060000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x003fffff00000000; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf7f8f7f8f800f800; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f780000ff80; +- *((unsigned long*)& __m256i_op0[1]) = 0xf7f8f7f80000fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003f780000ff80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1f001f00000007ef; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00001fff200007ef; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xf7f8f7f8; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00003f78; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xf7f8f7f8; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00003f78; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0xf7f8f7f8; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00003f78; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0xf7f8f7f8; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00003f78; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0xff800000; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0xff800000; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007ff000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007ff000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffffffff; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xaad5555500000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xaad5555500000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1f001f00000007ef; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00001fff200007ef; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1f001f00000007ef; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00001fff200007ef; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffff000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff01; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff2; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x1010101010001000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x101010100000000e; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000fe; +- *((unsigned long*)& __m256i_result[2]) = 0xffff01feffff01ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000fe; +- *((unsigned long*)& __m256i_result[0]) = 0xffff01feffff01ff; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8060000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8060000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x805f0000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x805f0000ffffffff; +- __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000f7f8f7f8; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000003f78; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000f7f8f7f8; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003f78; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[2]) = 0x805f0000ffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[0]) = 0x805f0000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000f7f8f7f8; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000003f78; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000f7f8f7f8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000003f78; +- __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8060000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8060000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff000000010000; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x805f0000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x805f0000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x805f0000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x805f0000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[2]) = 0x80be0000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[0]) = 0x80be0000ffffffff; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x80be0000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x80be0000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000100000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000100000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff00000000; +- __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x80000000; +- *((int*)& __m256_op1[6]) = 0xff800000; +- *((int*)& __m256_op1[5]) = 0x80000000; +- *((int*)& __m256_op1[4]) = 0x80000000; +- *((int*)& __m256_op1[3]) = 0x80000000; +- *((int*)& __m256_op1[2]) = 0xff800000; +- *((int*)& __m256_op1[1]) = 0x80000000; +- *((int*)& __m256_op1[0]) = 0x80000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000007fc00000400; +- *((unsigned long*)& __m256i_result[2]) = 0x0000040000000400; +- *((unsigned long*)& __m256i_result[1]) = 0x000007fc00000400; +- *((unsigned long*)& __m256i_result[0]) = 0x0000040000000400; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x35); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvpackod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000f0f0003; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000f1003; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff80be0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000f0f0002; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff80be0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000f1002; +- *((unsigned long*)& __m256i_op1[3]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x80000000ff800000; +- __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xdb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000f0f0003; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000f1003; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000f0001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000011; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000010000fffe; +- *((unsigned long*)& __m256i_result[2]) = 0x000000010000fffe; +- *((unsigned long*)& __m256i_result[1]) = 0x000000010000fffe; +- *((unsigned long*)& __m256i_result[0]) = 0x000000010000fffe; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000003d0000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000003d0000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000003f0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffc3ffff003e; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000003f0000ffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffc3ffff003e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000f07f0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffff177fffff0fc; +- __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000003dffc2; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- long_op0 = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000020202020; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000003f0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffc3ffff003e; +- *((unsigned long*)& __m128i_result[1]) = 0x00001f80007fff80; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe1ffff801f7f; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_d(__m256i_op0,-15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003fffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001084314a6; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001084314a6; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x800000007fff0001; +- *((unsigned long*)& __m256i_result[2]) = 0x80000000ff7f0001; +- *((unsigned long*)& __m256i_result[1]) = 0x800000007fff0001; +- *((unsigned long*)& __m256i_result[0]) = 0x80000000ff7f0001; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0008000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000003f0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffc3ffff003e; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000003dffc2; +- *((unsigned long*)& __m128i_result[1]) = 0xc000000fc0003fff; +- *((unsigned long*)& __m128i_result[0]) = 0xbffffff0ffffc00f; +- __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x1f001f00000007ef; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x00001fff200007ef; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001f0000001f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000003030000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001f0000001f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000030400; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000003d0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000003d0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000030000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000030000; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00010000fffe0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00010000fffe0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00010000fffe0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00010000fffe0000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1f001f00000007ef; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00001fff200007ef; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x80000000; +- *((int*)& __m256_op0[6]) = 0x80000000; +- *((int*)& __m256_op0[5]) = 0x80000000; +- *((int*)& __m256_op0[4]) = 0xff800000; +- *((int*)& __m256_op0[3]) = 0x80000000; +- *((int*)& __m256_op0[2]) = 0x80000000; +- *((int*)& __m256_op0[1]) = 0x80000000; +- *((int*)& __m256_op0[0]) = 0xff800000; +- *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc000000fc0003fff; +- *((unsigned long*)& __m128i_op0[0]) = 0xbffffff0ffffc00f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000003f0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffc3ffff003e; +- *((unsigned long*)& __m128i_result[1]) = 0x00c0000000bfffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ffffff; +- __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x28); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x800000007fff0001; +- *((unsigned long*)& __m256i_op1[2]) = 0x80000000ff7f0001; +- *((unsigned long*)& __m256i_op1[1]) = 0x800000007fff0001; +- *((unsigned long*)& __m256i_op1[0]) = 0x80000000ff7f0001; +- *((unsigned long*)& __m256i_result[3]) = 0xbfffffffffff8000; +- *((unsigned long*)& __m256i_result[2]) = 0xbfff800080000000; +- *((unsigned long*)& __m256i_result[1]) = 0xbfffffffffff8000; +- *((unsigned long*)& __m256i_result[0]) = 0xbfff800080000000; +- __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000001; +- *((int*)& __m128_op0[2]) = 0x084314a6; +- *((int*)& __m128_op0[1]) = 0x00000001; +- *((int*)& __m128_op0[0]) = 0x084314a6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vftintrp_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x800000007fff0001; +- *((unsigned long*)& __m256i_op1[2]) = 0x80000000ff7f0001; +- *((unsigned long*)& __m256i_op1[1]) = 0x800000007fff0001; +- *((unsigned long*)& __m256i_op1[0]) = 0x80000000ff7f0001; +- *((unsigned long*)& __m256i_result[3]) = 0x800000007fff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x80000000ff7f0000; +- *((unsigned long*)& __m256i_result[1]) = 0x800000007fff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x80000000ff7f0000; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000001d; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000001d; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6b6c4beb636443e3; +- *((unsigned long*)& __m128i_op1[0]) = 0x0507070805070708; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xbfffffff; +- *((int*)& __m256_op0[6]) = 0xffff8000; +- *((int*)& __m256_op0[5]) = 0xbfff8000; +- *((int*)& __m256_op0[4]) = 0x80000000; +- *((int*)& __m256_op0[3]) = 0xbfffffff; +- *((int*)& __m256_op0[2]) = 0xffff8000; +- *((int*)& __m256_op0[1]) = 0xbfff8000; +- *((int*)& __m256_op0[0]) = 0x80000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0xffff8000; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0xffff8000; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[1]) = 0x0909090909090909; +- *((unsigned long*)& __m128i_result[0]) = 0x0909090909090909; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001d; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x63); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmini_b(__m128i_op0,1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffff800000; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; +- __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0808ffff0808ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0808ffff0808ffff; +- __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc0000000c0000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc000000080400000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc0000000c0000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc000000080400000; +- *((unsigned long*)& __m256i_result[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_result[2]) = 0x0002000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_result[0]) = 0x0002000000010000; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00020000; +- *((int*)& __m256_op1[6]) = 0x00020000; +- *((int*)& __m256_op1[5]) = 0x00020000; +- *((int*)& __m256_op1[4]) = 0x00010000; +- *((int*)& __m256_op1[3]) = 0x00020000; +- *((int*)& __m256_op1[2]) = 0x00020000; +- *((int*)& __m256_op1[1]) = 0x00020000; +- *((int*)& __m256_op1[0]) = 0x00010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0002000000010000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0002000000010000; +- *((unsigned long*)& __m256d_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; +- __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000a6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_h(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a6; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff59; +- __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000a6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000080800000808; +- *((unsigned long*)& __m128i_result[0]) = 0x0000080800000808; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x80000000ff800000; +- *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff000200000000; +- *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff000200000000; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffint_d_l(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0909090900000909; +- *((unsigned long*)& __m128i_op1[0]) = 0x0909090909090909; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff000200000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff000200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff020000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff020000; +- __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256d_op0[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256d_op0[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000080800000808; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000080800000808; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8080000180800001; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0x001f00e0; +- *((int*)& __m256_op0[4]) = 0x1f1f1fff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x001f00e0; +- *((int*)& __m256_op0[0]) = 0x1f1f1fff; +- *((int*)& __m256_op1[7]) = 0x80000000; +- *((int*)& __m256_op1[6]) = 0x80000000; +- *((int*)& __m256_op1[5]) = 0x80000000; +- *((int*)& __m256_op1[4]) = 0xff800000; +- *((int*)& __m256_op1[3]) = 0x80000000; +- *((int*)& __m256_op1[2]) = 0x80000000; +- *((int*)& __m256_op1[1]) = 0x80000000; +- *((int*)& __m256_op1[0]) = 0xff800000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000001; +- *((int*)& __m256_result[5]) = 0x001f00e0; +- *((int*)& __m256_result[4]) = 0xff800000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000001; +- *((int*)& __m256_result[1]) = 0x001f00e0; +- *((int*)& __m256_result[0]) = 0xff800000; +- __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff59; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff59; +- __m128i_out = __lsx_vbitsel_v(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x08080808; +- *((int*)& __m128_op1[2]) = 0x08080808; +- *((int*)& __m128_op1[1]) = 0x08080808; +- *((int*)& __m128_op1[0]) = 0x08080808; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff000200000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff000200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x001f00e0ff800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x001f00e0ff800000; +- *((unsigned long*)& __m256i_result[3]) = 0xff80000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff000200000000; +- *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff000200000000; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808280808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808280808; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000100fffffeff; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0xb8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808081; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x80808080ffffffff; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000080800000808; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000080800000808; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffff80800001; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffff80800001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000080800000808; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000080800000808; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80800001; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff80800001; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff7fff7ef; +- *((unsigned long*)& __m128i_op1[0]) = 0x80808080ffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000080800000808; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000080800000808; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffbff8888080a; +- *((unsigned long*)& __m128i_result[0]) = 0x080803ff807ff7f9; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffbff8888080a; +- *((unsigned long*)& __m128i_op0[0]) = 0x080803ff807ff7f9; +- *((unsigned long*)& __m128i_result[1]) = 0x010105017878f8f6; +- *((unsigned long*)& __m128i_result[0]) = 0xf8f8fd0180810907; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1212121212121212; +- *((unsigned long*)& __m256i_result[2]) = 0x1212121212121212; +- *((unsigned long*)& __m256i_result[1]) = 0x1212121212121212; +- *((unsigned long*)& __m256i_result[0]) = 0x1212121212121212; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffff7fff7ef; +- *((unsigned long*)& __m128i_op0[0]) = 0x80808080ffffffff; +- *((int*)& __m128_result[3]) = 0xffffe000; +- *((int*)& __m128_result[2]) = 0xffffe000; +- *((int*)& __m128_result[1]) = 0xc6ffe000; +- *((int*)& __m128_result[0]) = 0xc6fde000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc6ffe000c6fde000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808081; +- *((unsigned long*)& __m128i_result[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_result[0]) = 0x467f6080467d607f; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000080800000808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000080800000808; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x010105017878f8f6; +- *((unsigned long*)& __m128i_op2[0]) = 0xf8f8fd0180810907; +- *((unsigned long*)& __m128i_result[1]) = 0x0000080800000808; +- *((unsigned long*)& __m128i_result[0]) = 0x0000080800000808; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0x467f6080467d607f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op0[0]) = 0x467f6080467d607f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808081; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xe000e0006080b040; +- __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0x467f6080467d607f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffe000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c6fde000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xe000e0006080b040; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffe000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c6fde000; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff00ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff00ffffffff; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xef); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffe000ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0xe364525335ede000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000fff00000e36; +- __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x34); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6fde000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x39); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00fe00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000040000000000; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x2a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000fff00000e36; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000fff0e36; +- __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00fe00ff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x00000fff00000e36; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000fef01000e27ca; +- __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffff00ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffff00ffffffff; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00fe00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00fe00fe00ff; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000fef01000e27ca; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001fde020000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001c4f940000; +- __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00fe00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; +- __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_w(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000100000001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0ed5ced7e51023e5; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00001000e51023e5; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_b(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; +- __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000e36400015253; +- *((unsigned long*)& __m128i_op0[0]) = 0x000035ed0001e000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000e36400015253; +- *((unsigned long*)& __m128i_op1[0]) = 0x000035ed0001e000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1c6c80007fffffff; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1c6c80007fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0038d800ff000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00fffe00fffffe00; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000fef01000f27ca; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; +- *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000ffef0010000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000fef01000f27ca; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,-4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000ffef0010000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000ff0000; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000ffef0010000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; +- *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff0000ff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff0000ff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff0000000000; +- __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000ffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000ffff; +- *((int*)& __m256_op1[7]) = 0x00ff00ff; +- *((int*)& __m256_op1[6]) = 0x00ff00ff; +- *((int*)& __m256_op1[5]) = 0x00ff00ff; +- *((int*)& __m256_op1[4]) = 0x00ff00ff; +- *((int*)& __m256_op1[3]) = 0x00ff00ff; +- *((int*)& __m256_op1[2]) = 0x00ff00ff; +- *((int*)& __m256_op1[1]) = 0x00ff00ff; +- *((int*)& __m256_op1[0]) = 0x00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000001ffffffff; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x21); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000010000010101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101000001000100; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; +- *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x0000ff00; +- *((int*)& __m128_op1[0]) = 0x00ff0000; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0xffffffff; +- *((int*)& __m128_result[0]) = 0xffffffff; +- __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000e36400005253; +- *((unsigned long*)& __m128i_op2[0]) = 0x000035ed0000e000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000010000010101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101000001000100; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x8000008000008080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080800000800080; +- __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000001ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00ff00ff; +- *((int*)& __m256_op0[6]) = 0x00ff00ff; +- *((int*)& __m256_op0[5]) = 0x00ff00ff; +- *((int*)& __m256_op0[4]) = 0x00ff00ff; +- *((int*)& __m256_op0[3]) = 0x00ff00ff; +- *((int*)& __m256_op0[2]) = 0x00ff00ff; +- *((int*)& __m256_op0[1]) = 0x00ff00ff; +- *((int*)& __m256_op0[0]) = 0x00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op2[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op2[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op2[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00ff00ff; +- *((int*)& __m256_op1[6]) = 0x00ff00ff; +- *((int*)& __m256_op1[5]) = 0x00ff00ff; +- *((int*)& __m256_op1[4]) = 0x00ff00ff; +- *((int*)& __m256_op1[3]) = 0x00ff00ff; +- *((int*)& __m256_op1[2]) = 0x00ff00ff; +- *((int*)& __m256_op1[1]) = 0x00ff00ff; +- *((int*)& __m256_op1[0]) = 0x00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0037ffc8d7ff2800; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[1]) = 0x001bffe4ebff9400; +- *((unsigned long*)& __m128i_result[0]) = 0xff80000000000000; +- __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0037ffc8d7ff2800; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00ffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_d(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000e2e3ffffd1d3; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000008000e2e3; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vaddwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000008000e2e3; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000008000e2e3; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000080806362; +- *((unsigned long*)& __m128i_result[0]) = 0x807f808000000000; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0038d800ff000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00fffe00fffffe00; +- *((unsigned long*)& __m128i_result[1]) = 0x0038f000ff000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00fffe00fffffe00; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_b(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0038d800ff000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x00fffe00fffffe00; +- *((unsigned long*)& __m128d_op2[1]) = 0x8000008000008080; +- *((unsigned long*)& __m128d_op2[0]) = 0x8080800000800080; +- *((unsigned long*)& __m128d_result[1]) = 0x0000008000008080; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; +- __m256i_out = __lasx_xvfclass_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0037ffc8d7ff2800; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ffffff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0038d800ff000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00fffe00fffffe00; +- *((unsigned long*)& __m128i_result[1]) = 0x0137ffc9d7fe2801; +- *((unsigned long*)& __m128i_result[0]) = 0x7f00ff017fffff01; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00e4880080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0080810080808100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256d_result[3]) = 0x41f0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x41f0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x41f0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x41f0000000000000; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000080806362; +- *((unsigned long*)& __m128i_op1[0]) = 0x807f808000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff80806362; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80806362; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00008080; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00008080; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_h(__m128i_op0,-4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000000ff801c9e; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000810000; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x40eff02383e383e4; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x800000810000807f; +- *((unsigned long*)& __m128i_op0[0]) = 0x808080010080007f; +- *((unsigned long*)& __m128i_op1[1]) = 0x800000810000807f; +- *((unsigned long*)& __m128i_op1[0]) = 0x808080010080007f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000020000020; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x62); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff801c9e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000810000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_result[1]) = 0x001d001d20000020; +- *((unsigned long*)& __m128i_result[0]) = 0x001d001d20000020; +- __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff801c9e; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000810000; +- *((unsigned long*)& __m128i_op1[1]) = 0x40eff02383e383e4; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000800000007fff; +- __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000000ff801c9e; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000810000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x0000ffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000020000020; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00ff00ff; +- *((int*)& __m128_op0[0]) = 0x00ff00ff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fffe0001; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffe0001; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fffe0001; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffe0001; +- __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x0000ffff; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x0000ffff; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x0000ffff; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x0000ffff; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000700000007; +- *((unsigned long*)& __m256i_result[2]) = 0x0007ffff0007ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000700000007; +- *((unsigned long*)& __m256i_result[0]) = 0x0007ffff0007ffff; +- __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x2d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000200000002000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000700000007; +- *((unsigned long*)& __m256i_op0[2]) = 0x0007ffff0007ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000700000007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0007ffff0007ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000700000007; +- *((unsigned long*)& __m256i_result[2]) = 0x00071f1f00071f1f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000700000007; +- *((unsigned long*)& __m256i_result[0]) = 0x00071f1f00071f1f; +- __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000020000020; +- *((unsigned long*)& __m128i_result[1]) = 0x2000002000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x2000002020000020; +- __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000020006; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffe000ffdf; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000200000002000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffe000ffdf; +- __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_h(__m128i_op0,0x0); +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe000ffdf; +- *((unsigned long*)& __m128i_result[1]) = 0x00001fff00001fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffe000ffdf; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffe000ffdf; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe000ffdf; +- *((unsigned long*)& __m128i_result[1]) = 0x0000200000002001; +- *((unsigned long*)& __m128i_result[0]) = 0x000000001fff0021; +- __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001200100012001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100200001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100200001; +- *((unsigned long*)& __m128i_op1[1]) = 0x00001fff00001fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x3a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001200100012001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000080000000800; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000000007ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000000007ffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000000007ffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000000007ffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0e0d0c0b0e0d0c0b; +- *((unsigned long*)& __m256i_op0[2]) = 0x0e0d0c0b0e0d0c0b; +- *((unsigned long*)& __m256i_op0[1]) = 0x0e0d0c0b0e0d0c0b; +- *((unsigned long*)& __m256i_op0[0]) = 0x0e0d0c0b0e0d0c0b; +- *((unsigned long*)& __m256i_result[3]) = 0x0a0908070a090807; +- *((unsigned long*)& __m256i_result[2]) = 0x0a0908070a090807; +- *((unsigned long*)& __m256i_result[1]) = 0x0a0908070a090807; +- *((unsigned long*)& __m256i_result[0]) = 0x0a0908070a090807; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0400400204004002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000080000000800; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000080000000800; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0400400204004002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000080000000800; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7171717171717171; +- *((unsigned long*)& __m256i_result[2]) = 0x8e8e8e8e8e8e8e8e; +- *((unsigned long*)& __m256i_result[1]) = 0x7171717171717171; +- *((unsigned long*)& __m256i_result[0]) = 0x8e8e8e8e8e8e8e8e; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0x71); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0400400204004002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000002002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x6d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff00000000; +- __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00001fff00001fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000003fffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x008e8e8e8e8e8e8e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x008e8e8e8e8e8e8e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000700000007; +- *((unsigned long*)& __m256i_op1[2]) = 0x0007ffff0007ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000700000007; +- *((unsigned long*)& __m256i_op1[0]) = 0x0007ffff0007ffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x008e8e8e8e8e8e8e; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x008e8e8e8e8e8e8e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007000008e700000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007000008e700000; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001200100012001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00001fff00001fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000003fffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00001fff00001fff; +- __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7171717171717171; +- *((unsigned long*)& __m256i_op0[2]) = 0x8e8e8e8e8f0e8e8e; +- *((unsigned long*)& __m256i_op0[1]) = 0x7171717171717171; +- *((unsigned long*)& __m256i_op0[0]) = 0x8e8e8e8e8f0e8e8e; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000007ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000007ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000007ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000007ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7171717171010101; +- *((unsigned long*)& __m256i_result[2]) = 0x8e8e8e8e8f00ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7171717171010101; +- *((unsigned long*)& __m256i_result[0]) = 0x8e8e8e8e8f00ffff; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7171717171717171; +- *((unsigned long*)& __m256i_op1[2]) = 0x8e8e8e8e8e8e8e8e; +- *((unsigned long*)& __m256i_op1[1]) = 0x7171717171717171; +- *((unsigned long*)& __m256i_op1[0]) = 0x8e8e8e8e8e8e8e8e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00001fff00001fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000007ffc000; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00001fff; +- *((int*)& __m128_op0[2]) = 0x00001fff; +- *((int*)& __m128_op0[1]) = 0x00000003; +- *((int*)& __m128_op0[0]) = 0xfffffffc; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0xfffffffc; +- __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000e2e20000e2e2; +- *((unsigned long*)& __m256i_op0[2]) = 0x00011d1c00011d9c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000e2e20000e2e2; +- *((unsigned long*)& __m256i_op0[0]) = 0x00011d1c00011d9c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000e2e20000e2e2; +- *((unsigned long*)& __m256i_op1[2]) = 0x00011d1c00011d9c; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000e2e20000e2e2; +- *((unsigned long*)& __m256i_op1[0]) = 0x00011d1c00011d9c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7171717171717171; +- *((unsigned long*)& __m256i_op0[2]) = 0x8e8e8e8e8e8e8e8e; +- *((unsigned long*)& __m256i_op0[1]) = 0x7171717171717171; +- *((unsigned long*)& __m256i_op0[0]) = 0x8e8e8e8e8e8e8e8e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x01c601c6fe3afe3a; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x01c601c6fe3afe3a; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001000000000; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffe080f6efc100f7; +- *((unsigned long*)& __m128i_op1[0]) = 0xefd32176ffe100f7; +- *((unsigned long*)& __m128i_result[1]) = 0x0000040000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000040000000000; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x71717171; +- *((int*)& __m256_op1[6]) = 0x71010101; +- *((int*)& __m256_op1[5]) = 0x8e8e8e8e; +- *((int*)& __m256_op1[4]) = 0x8f00ffff; +- *((int*)& __m256_op1[3]) = 0x71717171; +- *((int*)& __m256_op1[2]) = 0x71010101; +- *((int*)& __m256_op1[1]) = 0x8e8e8e8e; +- *((int*)& __m256_op1[0]) = 0x8f00ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7c007c0080008000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7c007c0080008000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x71717171; +- *((int*)& __m256_op0[6]) = 0x71010101; +- *((int*)& __m256_op0[5]) = 0x8e8e8e8e; +- *((int*)& __m256_op0[4]) = 0x8f00ffff; +- *((int*)& __m256_op0[3]) = 0x71717171; +- *((int*)& __m256_op0[2]) = 0x71010101; +- *((int*)& __m256_op0[1]) = 0x8e8e8e8e; +- *((int*)& __m256_op0[0]) = 0x8f00ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x01c601c6fe3afe3a; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x01c601c6fe3afe3a; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007000008e700000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007000008e700000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7171717171010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x8e8e8e8e8f00ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7171717171010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x8e8e8e8e8f00ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[2]) = 0xe2e2e202ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[0]) = 0xe2e2e202ffffffff; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000007ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000007ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000007ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000007ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001e0007ffff; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffe080f6efc100f7; +- *((unsigned long*)& __m128i_op0[0]) = 0xefd32176ffe100f7; +- int_result = 0x0000000000002176; +- int_out = __lsx_vpickve2gr_h(__m128i_op0,0x2); +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffc6ffc6; +- *((int*)& __m256_op0[6]) = 0x003a003a; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffc6ffc6; +- *((int*)& __m256_op0[2]) = 0x003a003a; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x71717171; +- *((int*)& __m256_op1[6]) = 0x71010101; +- *((int*)& __m256_op1[5]) = 0x8e8e8e8e; +- *((int*)& __m256_op1[4]) = 0x8f00ffff; +- *((int*)& __m256_op1[3]) = 0x71717171; +- *((int*)& __m256_op1[2]) = 0x71010101; +- *((int*)& __m256_op1[1]) = 0x8e8e8e8e; +- *((int*)& __m256_op1[0]) = 0x8f00ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2a29282726252423; +- *((unsigned long*)& __m128i_op0[0]) = 0x2221201f1e1d1c1b; +- *((unsigned long*)& __m128i_op1[1]) = 0x2a29282726252423; +- *((unsigned long*)& __m128i_op1[0]) = 0x2221201f1e1d1c1b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000005452505; +- *((unsigned long*)& __m128i_result[0]) = 0x00000004442403e4; +- __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_op0[2]) = 0xe2e2e202ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_op0[0]) = 0xe2e2e202ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000465; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000465; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; +- __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x2a29282726252423; +- *((unsigned long*)& __m128i_op1[0]) = 0x2221201f1e1d1c1b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x26); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000465; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000465; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000008d00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000008d00000000; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000002002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x2a29282726252423; +- *((unsigned long*)& __m128i_op1[0]) = 0x2221201f1e1d1c1b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00a8009800880078; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x07ffc000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffe080f6efc100f7; +- *((unsigned long*)& __m128i_op0[0]) = 0xefd32176ffe100f7; +- *((unsigned long*)& __m128i_op1[1]) = 0xffe080f6efc100f7; +- *((unsigned long*)& __m128i_op1[0]) = 0xefd32176ffe100f7; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x2c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0xffe37fe3001d001d; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8000; +- *((unsigned long*)& __m256i_result[1]) = 0xffe37fe3001d001d; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff8000; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffc6ffc6003a003a; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7fe37fff001fffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fe37fff001fffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fffffff; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000465; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000465; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000465; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000465; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; +- __m256i_out = __lasx_xvsrar_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[2]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[0]) = 0x7575757575757575; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0x75); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7c007c0080008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7c007c0080008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7c00000880008000; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2a29282726252423; +- *((unsigned long*)& __m128i_op0[0]) = 0x2221201f1e1d1c1b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,-1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000001e0007ffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffe37fe3001d001d; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffff8000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffe37fe3001d001d; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffe200000020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fffe00008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffe200000020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fffe00008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[3]) = 0x7575ffff75757595; +- *((unsigned long*)& __m256i_result[2]) = 0x7575ffff7575f575; +- *((unsigned long*)& __m256i_result[1]) = 0x7575ffff75757595; +- *((unsigned long*)& __m256i_result[0]) = 0x7575ffff7575f575; +- __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7575ffff75757595; +- *((unsigned long*)& __m256i_op0[2]) = 0x7575ffff7575f575; +- *((unsigned long*)& __m256i_op0[1]) = 0x7575ffff75757595; +- *((unsigned long*)& __m256i_op0[0]) = 0x7575ffff7575f575; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x3aadec4f6c7975b1; +- *((unsigned long*)& __m256i_result[2]) = 0x3abac5447fffca89; +- *((unsigned long*)& __m256i_result[1]) = 0x3aadec4f6c7975b1; +- *((unsigned long*)& __m256i_result[0]) = 0x3abac5447fffca89; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00a600e000a600e0; +- *((unsigned long*)& __m128i_op1[0]) = 0x01500178010000f8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0100000001000000; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7575ffff75757595; +- *((unsigned long*)& __m256i_op0[2]) = 0x7575ffff7575f575; +- *((unsigned long*)& __m256i_op0[1]) = 0x7575ffff75757595; +- *((unsigned long*)& __m256i_op0[0]) = 0x7575ffff7575f575; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op2[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op2[2]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op2[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op2[0]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000; +- __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x001d001d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x05452505; +- *((int*)& __m128_op0[1]) = 0x00000004; +- *((int*)& __m128_op0[0]) = 0x442403e4; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[2]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[0]) = 0x7575757575757575; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000; +- __m256i_out = __lasx_xvpermi_q(__m256i_op0,__m256i_op1,0x22); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000fff0; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_hu(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000fff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000fff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000001d001d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x3e00000440004000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x3e000004400f400f; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x05452505; +- *((int*)& __m128_op1[1]) = 0x00000004; +- *((int*)& __m128_op1[0]) = 0x442403e4; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000001000000010; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x001d001d; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x001d001d; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010003; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010003; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0102; +- *((unsigned long*)& __m256i_result[2]) = 0x007c000000810081; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0102; +- *((unsigned long*)& __m256i_result[0]) = 0x007c000000810081; +- __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000fff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000001d001d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000001d001d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000030003; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000030003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x7fe37fe3; +- *((int*)& __m256_op0[6]) = 0x001d001d; +- *((int*)& __m256_op0[5]) = 0x7fff7fff; +- *((int*)& __m256_op0[4]) = 0x7fff0000; +- *((int*)& __m256_op0[3]) = 0x7fe37fe3; +- *((int*)& __m256_op0[2]) = 0x001d001d; +- *((int*)& __m256_op0[1]) = 0x7fff7fff; +- *((int*)& __m256_op0[0]) = 0x7fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op0[2]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op0[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op0[0]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0102; +- *((unsigned long*)& __m256i_op0[2]) = 0x007c000000810081; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0102; +- *((unsigned long*)& __m256i_op0[0]) = 0x007c000000810081; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fe37fe3001d001d; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x007c7fff00007fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00817fff00810000; +- *((unsigned long*)& __m256i_result[1]) = 0x007c7fff00007fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00817fff00810000; +- __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffe8ffffffe8; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffe8ffffffe8; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffe8ffffffe8; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffe8ffffffe8; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010109; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- long_int_result = 0x0000000000000000; +- long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffe0; +- __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3aadec4f6c7975b1; +- *((unsigned long*)& __m256i_op0[2]) = 0x3abac5447fffca89; +- *((unsigned long*)& __m256i_op0[1]) = 0x3aadec4f6c7975b1; +- *((unsigned long*)& __m256i_op0[0]) = 0x3abac5447fffca89; +- *((unsigned long*)& __m256i_op1[3]) = 0x3aadec4f6c7975b1; +- *((unsigned long*)& __m256i_op1[2]) = 0x3abac5447fffca89; +- *((unsigned long*)& __m256i_op1[1]) = 0x3aadec4f6c7975b1; +- *((unsigned long*)& __m256i_op1[0]) = 0x3abac5447fffca89; +- *((unsigned long*)& __m256i_result[3]) = 0x0000755a0000d8f2; +- *((unsigned long*)& __m256i_result[2]) = 0x000075740000fffe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000755a0000d8f2; +- *((unsigned long*)& __m256i_result[0]) = 0x000075740000fffe; +- __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0102; +- *((unsigned long*)& __m256i_op0[2]) = 0x007c000000810081; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0102; +- *((unsigned long*)& __m256i_op0[0]) = 0x007c000000810081; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000005452505; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000004442403e4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffe0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000005452505; +- *((unsigned long*)& __m128i_result[0]) = 0x000000044525043c; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffe0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000005452505; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000044525043c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x7c00000880008000; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7c00000880008000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0100000001000100; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0100000001000100; +- __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0100000001000100; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0100000001000100; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x12835580; +- *((int*)& __m128_op0[0]) = 0xb880eb98; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0x55fcbad1; +- *((int*)& __m128_result[0]) = 0x7fc00000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0100000001000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100000001000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x3abb3abbbabababa; +- *((unsigned long*)& __m256i_result[2]) = 0x0080000000800080; +- *((unsigned long*)& __m256i_result[1]) = 0x3abb3abbbabababa; +- *((unsigned long*)& __m256i_result[0]) = 0x0080000000800080; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0100000001000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0100000001000100; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0100000001000100; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0100000001000100; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000040; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000040; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256i_result[3]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256i_result[1]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000180007fe8; +- __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0100000001000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100000001000100; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffe8ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffe8ffffffe8; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffe8ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffe8ffffffe8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000005452505; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000004442403e4; +- *((unsigned long*)& __m128i_op1[1]) = 0x03fc03fc03fc03fc; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000b4a00008808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080800000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000040; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x42800000; +- *((int*)& __m128_result[0]) = 0x42800000; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x7c00000880008000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x7c00000880008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffffff; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000001; +- *((int*)& __m256_op0[6]) = 0x7bfffff0; +- *((int*)& __m256_op0[5]) = 0x00000001; +- *((int*)& __m256_op0[4]) = 0x80007fe8; +- *((int*)& __m256_op0[3]) = 0x00000001; +- *((int*)& __m256_op0[2]) = 0x7bfffff0; +- *((int*)& __m256_op0[1]) = 0x00000001; +- *((int*)& __m256_op0[0]) = 0x80007fe8; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x0100000001000100; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x0100000001000100; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,-7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000017bfffff0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000180007fe8; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff7bfffff1; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff80007fe9; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff7bfffff1; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff80007fe9; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000004000000040; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000b4a00008808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080800000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000b4a00008808; +- __m128i_out = __lsx_vexth_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4280000042800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xbd7fffffbd800000; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000c0007; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000c0007; +- *((unsigned long*)& __m256i_op1[3]) = 0x3abb3abbbabababa; +- *((unsigned long*)& __m256i_op1[2]) = 0x0080000000800080; +- *((unsigned long*)& __m256i_op1[1]) = 0x3abb3abbbabababa; +- *((unsigned long*)& __m256i_op1[0]) = 0x0080000000800080; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000babababa; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000008c0087; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000babababa; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000008c0087; +- __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000b4a00008808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080800000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000001d001d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001d0000001d; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x007c7fff00007fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00817fff00810000; +- *((unsigned long*)& __m256i_op0[1]) = 0x007c7fff00007fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00817fff00810000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x7c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001b4a00007808; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00001b4a00007808; +- *((unsigned long*)& __m128i_result[1]) = 0x00001b4a00007808; +- *((unsigned long*)& __m128i_result[0]) = 0x00001b4a00007808; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00001b4a00007808; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe4b5ffff87f8; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001d0000001d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00001d0000001d00; +- __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000fff; +- __m128i_out = __lsx_vmaxi_h(__m128i_op0,-14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00001b4a00007808; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; +- __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x5252525252525252; +- *((unsigned long*)& __m256i_result[2]) = 0x5252525252525252; +- *((unsigned long*)& __m256i_result[1]) = 0x5252525252525252; +- *((unsigned long*)& __m256i_result[0]) = 0x5252525252525252; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0x52); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff01; +- __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001000; +- int_op1 = 0x000000007ff00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000001000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; +- __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00001b4a00007808; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vslei_hu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffff01; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffff01; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x807c7fffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x80817fff00810000; +- *((unsigned long*)& __m256i_op0[1]) = 0x807c7fffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x80817fff00810000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x80767f0101050101; +- *((unsigned long*)& __m256i_result[2]) = 0x80817f01007f0000; +- *((unsigned long*)& __m256i_result[1]) = 0x80767f0101050101; +- *((unsigned long*)& __m256i_result[0]) = 0x80817f01007f0000; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; +- __m256i_out = __lasx_xvreplve0_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[3]) = 0x5252525252525252; +- *((unsigned long*)& __m256i_op1[2]) = 0x5252525252525252; +- *((unsigned long*)& __m256i_op1[1]) = 0x5252525252525252; +- *((unsigned long*)& __m256i_op1[0]) = 0x5252525252525252; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff7bfffff1; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff80007fe9; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff7bfffff1; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff80007fe9; +- *((unsigned long*)& __m256i_result[3]) = 0x40ff40ff40ff40ff; +- *((unsigned long*)& __m256i_result[2]) = 0x407b40ff40ff40f1; +- *((unsigned long*)& __m256i_result[1]) = 0x40ff40ff40ff40ff; +- *((unsigned long*)& __m256i_result[0]) = 0x407b40ff40ff40f1; +- __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x40ff40ff40ff40ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x407b40ff40ff40f1; +- *((unsigned long*)& __m256i_op0[1]) = 0x40ff40ff40ff40ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x407b40ff40ff40f1; +- *((unsigned long*)& __m256i_op1[3]) = 0x40ff40ff40ff40ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x407b40ff40ff40f1; +- *((unsigned long*)& __m256i_op1[1]) = 0x40ff40ff40ff40ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x407b40ff40ff40f1; +- *((unsigned long*)& __m256i_result[3]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_result[2]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_result[1]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_result[0]) = 0xbf84bf00bf00bf0e; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00060000; +- *((int*)& __m256_op0[6]) = 0x00040000; +- *((int*)& __m256_op0[5]) = 0x00020000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00060000; +- *((int*)& __m256_op0[2]) = 0x00040000; +- *((int*)& __m256_op0[1]) = 0x00020000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256d_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256d_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256d_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256d_op1[3]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0006000000040000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00bf00bf00bf00bf; +- *((unsigned long*)& __m256i_op0[2]) = 0x00bf00bf00bf00bf; +- *((unsigned long*)& __m256i_op0[1]) = 0x00bf00bf00bf00bf; +- *((unsigned long*)& __m256i_op0[0]) = 0x00bf00bf00bf00bf; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_op0[2]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_op0[1]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_op0[0]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xdf80df80df80df80; +- *((unsigned long*)& __m256i_result[2]) = 0xdfc2df80df80df87; +- *((unsigned long*)& __m256i_result[1]) = 0xdf80df80df80df80; +- *((unsigned long*)& __m256i_result[0]) = 0xdfc2df80df80df87; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128d_op0[0]) = 0x0010001000100010; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_q(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_op0[2]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_op0[1]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_op0[0]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_op1[3]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_op1[2]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_op1[1]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_op1[0]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x9090909090909090; +- *((unsigned long*)& __m128i_result[0]) = 0x9090909090909090; +- __m128i_out = __lsx_vxori_b(__m128i_op0,0x90); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_result[0]) = 0xfffdfffdfffdfffd; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_op1[2]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000e0e0e0e0; +- *((unsigned long*)& __m256i_op1[0]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff4; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000e0e0e0e0; +- *((unsigned long*)& __m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000e0e0e0e0; +- *((unsigned long*)& __m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_op1[3]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000070007000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_result[3]) = 0xe070e000e070e000; +- *((unsigned long*)& __m256i_result[2]) = 0xe070e000e070e000; +- *((unsigned long*)& __m256i_result[1]) = 0xe070e000e070e000; +- *((unsigned long*)& __m256i_result[0]) = 0xe070e000e070e000; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x74); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_d(__m128i_op0,-16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x003f003f003f0040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x003f003f003f0040; +- *((unsigned long*)& __m256i_result[3]) = 0x00003f003f003f00; +- *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00003f003f003f00; +- *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000070007000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op1[3]) = 0x4040403fd03fd040; +- *((unsigned long*)& __m256i_op1[2]) = 0x4040403fd03fd040; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffd03fd040; +- *((unsigned long*)& __m256i_op1[0]) = 0x4040403fd03fd040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001010000010100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010000010100; +- __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256d_op0[2]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256d_op0[1]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256d_op0[0]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000070007000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff8fff9000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff8fff9000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff8fff9000; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000070007000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7000700070007000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0e0e0e0e0e0e0e0e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000e0e0e0e0e0e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x003f003f003f0040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x003f003f003f0040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x003f003f003f0040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x003f003f003f0040; +- *((unsigned long*)& __m256i_result[3]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_result[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_result[1]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_result[0]) = 0x00003f3f00004040; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfefbff06fffa0004; +- *((unsigned long*)& __m128i_op1[0]) = 0xfefeff04fffd0004; +- *((unsigned long*)& __m128i_result[1]) = 0x4008804080040110; +- *((unsigned long*)& __m128i_result[0]) = 0x4040801080200110; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op1[1]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x41000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x41000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x41000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x41000000; +- __m256_out = __lasx_xvffint_s_wu(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfff0000ffff0000f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffff0000; +- *((int*)& __m128_op0[2]) = 0xffff0000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x40088040; +- *((int*)& __m128_op1[2]) = 0x80040110; +- *((int*)& __m128_op1[1]) = 0x40408010; +- *((int*)& __m128_op1[0]) = 0x80200110; +- *((int*)& __m128_result[3]) = 0xffff0000; +- *((int*)& __m128_result[2]) = 0xffff0000; +- *((int*)& __m128_result[1]) = 0x40408010; +- *((int*)& __m128_result[0]) = 0x80200110; +- __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0e0e0e0e0e0e0e0e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000e0e0e0e0e0e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff8fff9000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff8fff9000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff8fff9000; +- *((unsigned long*)& __m256i_result[3]) = 0x00010e0d00009e0e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00009000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000e0e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00009000; +- __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_op0[2]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_op0[1]) = 0xbf00bf00bf00bf00; +- *((unsigned long*)& __m256i_op0[0]) = 0xbf84bf00bf00bf0e; +- *((unsigned long*)& __m256i_op1[3]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op1[1]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_result[3]) = 0xdf80ff20df80ff20; +- *((unsigned long*)& __m256i_result[2]) = 0xdfc2ff20df80ffa7; +- *((unsigned long*)& __m256i_result[1]) = 0xdf80ff20df80ff20; +- *((unsigned long*)& __m256i_result[0]) = 0xdfc2ff20df80ffa7; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op1[1]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00003f3f00004040; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_b(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdf80df80df80df80; +- *((unsigned long*)& __m256i_op0[2]) = 0xdfc2df80df80df87; +- *((unsigned long*)& __m256i_op0[1]) = 0xdf80df80df80df80; +- *((unsigned long*)& __m256i_op0[0]) = 0xdfc2df80df80df87; +- *((unsigned long*)& __m256i_op1[3]) = 0xdf80df80df80df80; +- *((unsigned long*)& __m256i_op1[2]) = 0xdfc2df80df80df87; +- *((unsigned long*)& __m256i_op1[1]) = 0xdf80df80df80df80; +- *((unsigned long*)& __m256i_op1[0]) = 0xdfc2df80df80df87; +- *((unsigned long*)& __m256i_result[3]) = 0x2080208020802080; +- *((unsigned long*)& __m256i_result[2]) = 0x203e208020802079; +- *((unsigned long*)& __m256i_result[1]) = 0x2080208020802080; +- *((unsigned long*)& __m256i_result[0]) = 0x203e208020802079; +- __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdf80ff20df80ff20; +- *((unsigned long*)& __m256i_op0[2]) = 0xdfc2ff20df80ffa7; +- *((unsigned long*)& __m256i_op0[1]) = 0xdf80ff20df80ff20; +- *((unsigned long*)& __m256i_op0[0]) = 0xdfc2ff20df80ffa7; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x80208020c22080a7; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x80208020c22080a7; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x80208020c22080a7; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x80208020c22080a7; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xdf80ff20df80ff20; +- *((unsigned long*)& __m256i_op1[2]) = 0xdfc2ff20df80ffa7; +- *((unsigned long*)& __m256i_op1[1]) = 0xdf80ff20df80ff20; +- *((unsigned long*)& __m256i_op1[0]) = 0xdfc2ff20df80ffa7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000840100000000; +- *((unsigned long*)& __m256i_result[2]) = 0xbffebffec0febfff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000840100000000; +- *((unsigned long*)& __m256i_result[0]) = 0xbffebffec0febfff; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffff0000; +- *((int*)& __m128_op0[2]) = 0xffff0000; +- *((int*)& __m128_op0[1]) = 0x40408010; +- *((int*)& __m128_op0[0]) = 0x80200110; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff4; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000840100000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xbffebffec0fe0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000840100000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xbffebffec0fe0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000420080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000420080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x5fff5fff607f0000; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000033; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00004200; +- *((int*)& __m256_op0[6]) = 0x80000000; +- *((int*)& __m256_op0[5]) = 0x5fff5fff; +- *((int*)& __m256_op0[4]) = 0x607f0000; +- *((int*)& __m256_op0[3]) = 0x00004200; +- *((int*)& __m256_op0[2]) = 0x80000000; +- *((int*)& __m256_op0[1]) = 0x5fff5fff; +- *((int*)& __m256_op0[0]) = 0x607f0000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00004200; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0x5fff5fff; +- *((int*)& __m256_result[4]) = 0x607f0000; +- *((int*)& __m256_result[3]) = 0x00004200; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0x5fff5fff; +- *((int*)& __m256_result[0]) = 0x607f0000; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffc0c0ffffbfc0; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffc0c0ffffbfc0; +- __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00003f3f0000400d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00003f3f0000400d; +- __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00010e0d00009e0e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00009000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000e0e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00009000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000033; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x71); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3fc03fc000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f801fe000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x3fc03fc000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f801fe000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe05f8102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe05f8102; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffe05f8102; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffe05f8102; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000001607f0000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000001607f0000; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000033; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffbdff7fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xa000a0009f80ffcc; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffbdff7fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xa000a0009f80ffcc; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000033; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000033; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3fc03fc000000003; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f7f1fd800000004; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x3fc03fc000000004; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000033; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000033; +- *((int*)& __m256_op1[7]) = 0x00004200; +- *((int*)& __m256_op1[6]) = 0x80000000; +- *((int*)& __m256_op1[5]) = 0x5fff5fff; +- *((int*)& __m256_op1[4]) = 0x607f0000; +- *((int*)& __m256_op1[3]) = 0x00004200; +- *((int*)& __m256_op1[2]) = 0x80000000; +- *((int*)& __m256_op1[1]) = 0x5fff5fff; +- *((int*)& __m256_op1[0]) = 0x607f0000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f00004040; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffe05f8102; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffe05f8102; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffc0c0ffffbfc0; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffc0c0ffffbfc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003f3f0000400d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003f3f0000400d; +- *((unsigned long*)& __m256i_result[3]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x44); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000900000009; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3fc03fc000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f801fe000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fc03fc000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x7f801fe000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3fc03fc000000004; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fc03fc000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffc03fc040; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffe00000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffe00000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f801fe000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3fc03fc000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x3fc03fc000000003; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f7f1fd800000004; +- *((unsigned long*)& __m128i_result[1]) = 0x7f1f00003f3f0000; +- *((unsigned long*)& __m128i_result[0]) = 0x3f3f00007f1f0000; +- __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003f3f0000400d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003f3f0000400d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x3fc03fc000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x3fc03fc000000003; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f7f1fd800000004; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xc0411fe800000000; +- __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f801fe000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fc03fc000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x7f801fdfffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x3fc03fc000000003; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x3f413f4100000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x7f801fe000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000017fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000420080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000420080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000420080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000420080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_op2[3]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x1000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000420080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000420080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x5fff5fff607f0000; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x28); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe05f8102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe05f8102; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000420080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x5fff5fff607f0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f801fe000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3fc03fc000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x00000003fc00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001fe01fe00; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000003fc00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001fe01fe00; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000000a; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000a; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x10000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x10000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffff0000; +- *((int*)& __m128_op1[2]) = 0xffff0000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmina_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000000a; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000a; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x3f413f4100000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f801fe000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xc0411fe800000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x601fbfbeffffffff; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbf3efff536d5169b; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ebdfffffddf3f40; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3f5ec0a0feefa0b0; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xdf80df80df80df80; +- *((unsigned long*)& __m256i_op1[2]) = 0xdfc2df80df80df87; +- *((unsigned long*)& __m256i_op1[1]) = 0xdf80df80df80df80; +- *((unsigned long*)& __m256i_op1[0]) = 0xdfc2df80df80df87; +- *((unsigned long*)& __m256i_result[3]) = 0xff21ff21ff21ff21; +- *((unsigned long*)& __m256i_result[2]) = 0xff21ff21ff21ff21; +- *((unsigned long*)& __m256i_result[1]) = 0xff21ff21ff21ff21; +- *((unsigned long*)& __m256i_result[0]) = 0xff21ff21ff21ff21; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffe00000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffe00000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffff00000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffff00000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffe00029f9f6061; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x64e464e464e464e4; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffeffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000064e264e6; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128d_op1[0]) = 0xfffe00029f9f6061; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x601fbfbeffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffb00fdfdf7ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfff8000000000000; +- __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007fff7fff; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; +- __m256i_out = __lasx_xvmini_d(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xff21ff21ff21ff21; +- *((unsigned long*)& __m256d_op0[2]) = 0xff21ff21ff21ff21; +- *((unsigned long*)& __m256d_op0[1]) = 0xff21ff21ff21ff21; +- *((unsigned long*)& __m256d_op0[0]) = 0xff21ff21ff21ff21; +- *((unsigned long*)& __m256d_op1[3]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256d_op1[2]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256d_op1[1]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256d_op1[0]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; +- __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_op1[2]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_op1[1]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_op1[0]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0100010001000100; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x01000100; +- *((int*)& __m128_op0[0]) = 0x01000100; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x64e464e4; +- *((int*)& __m128_op1[0]) = 0x64e464e4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; +- __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_op0[2]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_op0[1]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_op0[0]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[3]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_result[2]) = 0xff21c241ff21c238; +- *((unsigned long*)& __m256i_result[1]) = 0xff21c241ff21c241; +- *((unsigned long*)& __m256i_result[0]) = 0xff21c241ff21c238; +- __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100010001000100; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3f5ec0a0feefa0b0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ff02d060; +- __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff02d060; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ff02d060; +- __m128i_out = __lsx_vrotri_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff02d060; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff02d060; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff02d06000000000; +- __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_du_wu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffe00029f9f6061; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3f5ec0a0feefa0b0; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffe00029fb060b1; +- __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x3fff3fff3fff3fff; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m128d_op2[1]) = 0xfffb00fdfdf7ffff; +- *((unsigned long*)& __m128d_op2[0]) = 0xfff8000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xfffb00fdfdf7ffff; +- *((unsigned long*)& __m128d_result[0]) = 0xfff8000000000000; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffb00fdfdf7ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff8000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffb00fdfdf7ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff8000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffb00fdfdf7ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfff8000000000000; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0100010001000100; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffb00fdfdf7ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff8000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00fe000100cf005f; +- *((unsigned long*)& __m128d_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128d_result[1]) = 0x5f675e96e29a5a60; +- *((unsigned long*)& __m128d_result[0]) = 0x7fff7fff7fff7fff; +- __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- int_op0 = 0x000000007ff00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00fe000100cf005f; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x5f675e96e29a5a60; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x5fff5e97e2ff5abf; +- *((unsigned long*)& __m128i_result[0]) = 0xfefffefffefffeff; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000009; +- __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x26); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5f675e96e29a5a60; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x965f5e9660e25a60; +- *((unsigned long*)& __m128i_result[0]) = 0xff7f7fffff7f7fff; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x34); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00ff000100ff00fe; +- *((unsigned long*)& __m128d_op0[0]) = 0x00ff003000ff00a0; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfrint_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000011; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000011; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5f675e96e29a5a60; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x5e695e95e1cb5a01; +- __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000011; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000011; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000088; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000088; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000088; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000088; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000009; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000009; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000009; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x5e695e95e1cb5a01; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x5f675e96; +- *((int*)& __m128_op0[2]) = 0xe29a5a60; +- *((int*)& __m128_op0[1]) = 0x7fff7fff; +- *((int*)& __m128_op0[0]) = 0x7fff7fff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x5e695e95; +- *((int*)& __m128_op1[0]) = 0xe1cb5a01; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000005e695e95; +- *((unsigned long*)& __m128i_op0[0]) = 0x5e695e96c396b402; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000005e94; +- *((unsigned long*)& __m128i_result[0]) = 0x00005e96ffffb402; +- __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000005e94; +- *((unsigned long*)& __m128i_op0[0]) = 0x00005e96ffffb402; +- *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000000bd; +- *((unsigned long*)& __m128i_result[0]) = 0x0001fc0000fffeff; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- long_op0 = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000020006; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff000100ff00fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff003000ff00a0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff000100ff00fe; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff003000ff00a0; +- __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000020006; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000020006; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000020006; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000020006; +- __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000c; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff00; +- __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00ff000100ff00fe; +- *((unsigned long*)& __m128d_op0[0]) = 0x00ff003000ff00a0; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000005e695e95; +- *((unsigned long*)& __m128d_op1[0]) = 0x5e695e96c396b402; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2ea268972ea2966a; +- *((unsigned long*)& __m128i_op0[0]) = 0x4026f4ffbc175bff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x5d7f5d807fea807f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f017ffd; +- *((unsigned long*)& __m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f017ffd; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffff81ff7d; +- __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0001ffff0001; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff0001; +- __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000005e695e95; +- *((unsigned long*)& __m128i_op0[0]) = 0x5e695e96c396b402; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_d(__m128i_op0,-11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff00000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff00000001; +- __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffff0001; +- *((int*)& __m128_op0[2]) = 0xffff0001; +- *((int*)& __m128_op0[1]) = 0xffff0001; +- *((int*)& __m128_op0[0]) = 0xffff0001; +- *((int*)& __m128_result[3]) = 0xffff0001; +- *((int*)& __m128_result[2]) = 0xffff0001; +- *((int*)& __m128_result[1]) = 0xffff0001; +- *((int*)& __m128_result[0]) = 0xffff0001; +- __m128_out = __lsx_vfrecip_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00fe000100cf005f; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7f00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100010100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x03f0000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x03f0000000000000; +- __m256i_out = __lasx_xvslli_d(__m256i_op0,0x34); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x5f675e96a8d359f5; +- *((unsigned long*)& __m128d_op0[0]) = 0x46387f95d9a68001; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x5d7f5d007f6a007f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_result[3]) = 0xff81ff7dffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_result[1]) = 0xff81ff7dffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffff81ff7d; +- __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0x28); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff81ff7dffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_op0[1]) = 0xff81ff7dffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff81ff7d; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f017ffd; +- *((unsigned long*)& __m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f017ffd; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000007; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000007; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xa4a4a4a4a4a4a4a4; +- *((unsigned long*)& __m256i_result[2]) = 0xa4a4a4a4a4a4a4a4; +- *((unsigned long*)& __m256i_result[1]) = 0xa4a4a4a4a4a4a4a4; +- *((unsigned long*)& __m256i_result[0]) = 0xa4a4a4a4a4a4a4a4; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0xa4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000012; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000100010100; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00fe0001; +- *((int*)& __m128_op1[2]) = 0x00cf005f; +- *((int*)& __m128_op1[1]) = 0x7fff7fff; +- *((int*)& __m128_op1[0]) = 0x7fff7f00; +- *((int*)& __m128_op2[3]) = 0x5d7f5d00; +- *((int*)& __m128_op2[2]) = 0x7f6a007f; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x5d7f5d00; +- *((int*)& __m128_result[2]) = 0x7f6a007f; +- *((int*)& __m128_result[1]) = 0x7fff7fff; +- *((int*)& __m128_result[0]) = 0x7fff7f00; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000012; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x5d7f5d007f6a007f; +- *((unsigned long*)& __m128d_op1[0]) = 0x7fff7fff7fff7f00; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_result[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_result[0]) = 0x43ef878780000009; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff000100ff00fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff003000ff00a0; +- *((unsigned long*)& __m128i_result[1]) = 0x0008000f00080008; +- *((unsigned long*)& __m128i_result[0]) = 0x0008000a00080008; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0218ff78fc38fc38; +- *((unsigned long*)& __m256i_result[2]) = 0xfc00000000000048; +- *((unsigned long*)& __m256i_result[1]) = 0x0218ff78fc38fc38; +- *((unsigned long*)& __m256i_result[0]) = 0xfc00000000000048; +- __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_h(__m128i_op0,-10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op1[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op1[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x43ef878780000009; +- __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x36); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x01fe01fd01fd01fd; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x5d7f5d007f6a007f; +- *((unsigned long*)& __m128i_op2[0]) = 0x7fff7fff7fff7f00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000002ebf; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x31); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00002ebf; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0xffffffff; +- *((int*)& __m128_result[0]) = 0xffffffff; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; +- *((unsigned long*)& __m128i_result[0]) = 0xc404040404040404; +- __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0006ffff0004ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0002ffff0000ff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0006ffff0004ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002ffff0000ff00; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000000d; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000000e; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000000d; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000e; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x5d7f5d807fea807f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007f008000ea007f; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff8000; +- *((unsigned long*)& __m256i_result[2]) = 0x000043efffff8000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff8000; +- *((unsigned long*)& __m256i_result[0]) = 0x000043efffff8000; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000040400000404; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000040400000404; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x10fbe1e2e0000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x10fbe1e2e0000002; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000c0000005; +- *((unsigned long*)& __m256i_result[2]) = 0x21f8c3c4c0000005; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000c0000005; +- *((unsigned long*)& __m256i_result[0]) = 0x21f8c3c4c0000005; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007f008000ea007f; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xf000000000000000; +- __m128i_out = __lsx_vsat_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000c0000005; +- *((unsigned long*)& __m256i_op0[2]) = 0x21f8c3c4c0000005; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000c0000005; +- *((unsigned long*)& __m256i_op0[0]) = 0x21f8c3c4c0000005; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff8000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000043efffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff8000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000043efffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xbfffa004fffd8000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xbfffa004fffd8000; +- __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000000c0000005; +- *((unsigned long*)& __m256d_op1[2]) = 0x21f8c3c4c0000005; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000000c0000005; +- *((unsigned long*)& __m256d_op1[0]) = 0x21f8c3c4c0000005; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op1[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000080000009; +- *((unsigned long*)& __m256i_op1[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5d7f5d807fea807f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x5d7f5d807fea807f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xbafebb00ffd500fe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadda_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0218ff78fc38fc38; +- *((unsigned long*)& __m256d_op0[2]) = 0xfc00000000000048; +- *((unsigned long*)& __m256d_op0[1]) = 0x0218ff78fc38fc38; +- *((unsigned long*)& __m256d_op0[0]) = 0xfc00000000000048; +- *((unsigned long*)& __m256i_result[3]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfc00000000000048; +- *((unsigned long*)& __m256i_result[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfc00000000000048; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x43ef8787; +- *((int*)& __m256_op0[4]) = 0x8000ffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x43ef8787; +- *((int*)& __m256_op0[0]) = 0x8000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000001df00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000001df00000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xbafebb00ffd500fe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbafebb00ffd500fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xbafebb00ffd500fe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xbafebb00ffd500fe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000c0000005; +- *((unsigned long*)& __m256i_op1[2]) = 0x21f8c3c4c0000005; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000c0000005; +- *((unsigned long*)& __m256i_op1[0]) = 0x21f8c3c4c0000005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000048; +- *((unsigned long*)& __m256i_op0[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000048; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xbfffa004fffd8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xbfffa004fffd8000; +- *((unsigned long*)& __m256i_result[3]) = 0x00003f0000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00002fffe8013fff; +- *((unsigned long*)& __m256i_result[1]) = 0x00003f0000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00002fffe8013fff; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x0218ff78; +- *((int*)& __m256_op1[6]) = 0xfc38fc38; +- *((int*)& __m256_op1[5]) = 0xfc000000; +- *((int*)& __m256_op1[4]) = 0x00000048; +- *((int*)& __m256_op1[3]) = 0x0218ff78; +- *((int*)& __m256_op1[2]) = 0xfc38fc38; +- *((int*)& __m256_op1[1]) = 0xfc000000; +- *((int*)& __m256_op1[0]) = 0x00000048; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0xfc38fc38; +- *((int*)& __m256_result[5]) = 0xfc000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0xfc38fc38; +- *((int*)& __m256_result[1]) = 0xfc000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x43ef87878000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ef87878000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op1[2]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op1[0]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff0000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffbfffa0ffffff80; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff0000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffbfffa0ffffff80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xbfffa004fffd8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xbfffa004fffd8000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ffff0000ff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ffff0000ff; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00ff00ffff0000ff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00ff00ffff0000ff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_d(__m128i_op0,-15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00ff0000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffbfffa0ffffff80; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00ff0000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffbfffa0ffffff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff02000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff02000000; +- __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000feccfecc; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000feccfecc; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xbafebb00; +- *((int*)& __m128_op1[2]) = 0xffd500fe; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op1[2]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op1[0]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fefefe000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fefefe000000; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff02000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff02000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_result[2]) = 0x7e00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_result[0]) = 0x7e00000000000000; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xbafebb00ffd500fe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_op0[2]) = 0x7e00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_op0[0]) = 0x7e00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_result[2]) = 0x7e00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_result[0]) = 0x7e00000000000000; +- __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff02000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff02000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x010c7fbc; +- *((int*)& __m256_op0[6]) = 0x7e1c7e1c; +- *((int*)& __m256_op0[5]) = 0xfe000000; +- *((int*)& __m256_op0[4]) = 0x00000024; +- *((int*)& __m256_op0[3]) = 0x010c7fbc; +- *((int*)& __m256_op0[2]) = 0x7e1c7e1c; +- *((int*)& __m256_op0[1]) = 0xfe000000; +- *((int*)& __m256_op0[0]) = 0x00000024; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff02000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff02000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_op0[2]) = 0x7e00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_op0[0]) = 0x7e00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_result[2]) = 0x7e00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007e1c7e1c; +- *((unsigned long*)& __m256i_result[0]) = 0x7e00000000000000; +- __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000001c9880; +- *((unsigned long*)& __m256d_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000001c9880; +- *((unsigned long*)& __m256d_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- long_op0 = 0x000000007ff00000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007ff00000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007ff00000; +- __m128i_out = __lsx_vreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op0[2]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fc38fc38; +- *((unsigned long*)& __m256i_op0[0]) = 0xfc00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002001800ff0078; +- *((unsigned long*)& __m256i_op1[2]) = 0x01f8007001f80070; +- *((unsigned long*)& __m256i_op1[1]) = 0x0002001800ff0078; +- *((unsigned long*)& __m256i_op1[0]) = 0x01f8007001f80070; +- *((unsigned long*)& __m256i_op2[3]) = 0x0218ff78fc38fc38; +- *((unsigned long*)& __m256i_op2[2]) = 0xfc00000000000048; +- *((unsigned long*)& __m256i_op2[1]) = 0x0218ff78fc38fc38; +- *((unsigned long*)& __m256i_op2[0]) = 0xfc00000000000048; +- *((unsigned long*)& __m256i_result[3]) = 0x00300b40fc001678; +- *((unsigned long*)& __m256i_result[2]) = 0xfc00000000001f80; +- *((unsigned long*)& __m256i_result[1]) = 0x00300b40fc001678; +- *((unsigned long*)& __m256i_result[0]) = 0xfc00000000001f80; +- __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x4429146a7b4c88b2; +- *((unsigned long*)& __m128d_op0[0]) = 0xe22b3595efa4aa0c; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000048; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000048; +- long_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000048; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000001c9880; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000001c9880; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffe36780; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffe36780; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000100000001; +- __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010100000101; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010100000101; +- __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffe36780; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffe36780; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_result[2]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_result[0]) = 0x0100000100000001; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000000ffe36780; +- *((unsigned long*)& __m256d_op1[2]) = 0x8000000100000001; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000000ffe36780; +- *((unsigned long*)& __m256d_op1[0]) = 0x8000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffb; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffb; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffb; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffb; +- *((unsigned long*)& __m128i_result[1]) = 0x00000100000000fc; +- *((unsigned long*)& __m128i_result[0]) = 0x00000100000000fc; +- __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x2d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000100000000fc; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000100000000fc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000100000000fc; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000100000000fc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0100000001000000; +- __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffe36780; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffe36780; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,-8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op1[2]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op1[0]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000010100000101; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000010100000101; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op1[2]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op1[0]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000010000000000; +- __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000ffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffff0000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000010100000101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000010100000101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- int_result = 0x0000000000000002; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; +- __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0x6f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x66); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0404050404040404; +- *((unsigned long*)& __m128i_result[0]) = 0x0404050404040404; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0404050404040404; +- *((unsigned long*)& __m128i_op1[0]) = 0x0404050404040404; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000004040504; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000004040504; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200010002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000200010002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000010004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000101; +- *((int*)& __m128_op0[2]) = 0x00000101; +- *((int*)& __m128_op0[1]) = 0x00000101; +- *((int*)& __m128_op0[0]) = 0x00000101; +- *((int*)& __m128_op1[3]) = 0x00000002; +- *((int*)& __m128_op1[2]) = 0x00000002; +- *((int*)& __m128_op1[1]) = 0x00000002; +- *((int*)& __m128_op1[0]) = 0x00000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op0[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op0[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op0[0]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op0[2]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op2[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op2[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op2[0]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_result[2]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_result[0]) = 0x0100000100000001; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffdfe01; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffdfe0200000002; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4000000000000000; +- __m128i_out = __lsx_vsrlr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x9090909090909090; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000004040504; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000004040504; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000010100000101; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000010100000101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op1[2]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op1[0]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000008050501; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x04040504; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x04040504; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op0[2]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f91; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000f91; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000010100000101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000010100000101; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x08050501; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x08050501; +- *((int*)& __m256_op1[7]) = 0x90909090; +- *((int*)& __m256_op1[6]) = 0x90909090; +- *((int*)& __m256_op1[5]) = 0x90909090; +- *((int*)& __m256_op1[4]) = 0x90909090; +- *((int*)& __m256_op1[3]) = 0x90909090; +- *((int*)& __m256_op1[2]) = 0x90909090; +- *((int*)& __m256_op1[1]) = 0x90909090; +- *((int*)& __m256_op1[0]) = 0x90909090; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op0[2]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000008050501; +- __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_d(__m128i_op0,0x3d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; +- __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000008050501; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000008050501; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x01000000; +- *((int*)& __m128_op0[0]) = 0x01000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op0[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op0[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op0[0]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[0]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_result[3]) = 0x6f6f6f6f6f6f6f6f; +- *((unsigned long*)& __m256i_result[2]) = 0x6f6f6f6f6f6f6f6f; +- *((unsigned long*)& __m256i_result[1]) = 0x6f6f6f6f6f6f6f6f; +- *((unsigned long*)& __m256i_result[0]) = 0x6f6f6f6f6f6f6f6f; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; +- __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x00ffff00ff00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0x00ffff00ff00ff00; +- __m128i_out = __lsx_vldi(-1686); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ffff00ff00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ffff00ff00ff00; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00ffff00; +- *((int*)& __m128_op0[2]) = 0xff00ff00; +- *((int*)& __m128_op0[1]) = 0x00ffff00; +- *((int*)& __m128_op0[0]) = 0xff00ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000008050501; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op0[2]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op0[1]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op0[0]) = 0x9090909090909090; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_result[2]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_result[1]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_result[0]) = 0x8848c848c848c848; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000f91; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000f91; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080000000; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xff00ff00; +- *((int*)& __m128_op0[2]) = 0xff00ff00; +- *((int*)& __m128_op0[1]) = 0xff00ff00; +- *((int*)& __m128_op0[0]) = 0xff00ff00; +- *((int*)& __m128_result[3]) = 0x7fc00000; +- *((int*)& __m128_result[2]) = 0x7fc00000; +- *((int*)& __m128_result[1]) = 0x7fc00000; +- *((int*)& __m128_result[0]) = 0x7fc00000; +- __m128_out = __lsx_vfsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000022; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfe813f00fe813f00; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe813f00fe813f00; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfe813f00fe813f00; +- __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe813f00fe813f00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000033; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfe813f00fe813f00; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe813f00fe813f00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff800000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff800000000000; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000f91; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000f91; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f90; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000f90; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ff000000ff00; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff000000ff00; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xff00ff00ff00ff00; +- *((int*)& __m128_result[3]) = 0xffe00000; +- *((int*)& __m128_result[2]) = 0xffe00000; +- *((int*)& __m128_result[1]) = 0xffe00000; +- *((int*)& __m128_result[0]) = 0xffe00000; +- __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op1[1]) = 0xfe813f00fe813f00; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe813f00fe813f00; +- *((unsigned long*)& __m128i_result[1]) = 0xffff017fffff017f; +- *((unsigned long*)& __m128i_result[0]) = 0xffff017fffff017f; +- __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff9f017f1fa0b199; +- *((unsigned long*)& __m128i_op0[0]) = 0x1197817fd839ea3e; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000033; +- *((unsigned long*)& __m128i_result[1]) = 0xff011fb11181d8ea; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_result[3]) = 0xc800c800c800c800; +- *((unsigned long*)& __m256i_result[2]) = 0x8800c800c800c801; +- *((unsigned long*)& __m256i_result[1]) = 0xc800c800c800c800; +- *((unsigned long*)& __m256i_result[0]) = 0x8800c800c800c801; +- __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f90; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000f90; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff70; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff70; +- __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000003e; +- *((unsigned long*)& __m128i_op1[1]) = 0x00fe00fe000200fe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe000200fe; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000003e; +- *((unsigned long*)& __m128i_result[0]) = 0xfefe02fefefe02fe; +- __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00fe00fe; +- *((int*)& __m128_op0[2]) = 0x000200fe; +- *((int*)& __m128_op0[1]) = 0x00fe00fe; +- *((int*)& __m128_op0[0]) = 0x000200fe; +- *((int*)& __m128_result[3]) = 0xc2fc0000; +- *((int*)& __m128_result[2]) = 0xc3040000; +- *((int*)& __m128_result[1]) = 0xc2fc0000; +- *((int*)& __m128_result[0]) = 0xc3040000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000ff70; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000ff70; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000100; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000100; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff011fb11181d8ea; +- *((unsigned long*)& __m128i_op0[0]) = 0x80ff800000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00fe00fe000200fe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe000200fe; +- *((unsigned long*)& __m128i_result[1]) = 0x00fd02fe00002302; +- *((unsigned long*)& __m128i_result[0]) = 0x007ffd0200000000; +- __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xc800c800c800c800; +- *((unsigned long*)& __m256i_op1[2]) = 0x8800c800c800c801; +- *((unsigned long*)& __m256i_op1[1]) = 0xc800c800c800c800; +- *((unsigned long*)& __m256i_op1[0]) = 0x8800c800c800c801; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xc800c800c800c800; +- *((unsigned long*)& __m256i_op1[2]) = 0x8800c800c800c801; +- *((unsigned long*)& __m256i_op1[1]) = 0xc800c800c800c800; +- *((unsigned long*)& __m256i_op1[0]) = 0x8800c800c800c801; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffcc9a989a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0007000000050000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003000000010000; +- *((unsigned long*)& __m128i_result[1]) = 0x00003fff00003fff; +- *((unsigned long*)& __m128i_result[0]) = 0x00003fff00003fff; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xc2fc0000c3040000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc2fc0000c3040000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff37b737b8; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff77b737b8; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff37b737b8; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff77b737b8; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f90; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000f90; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000f90; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000f90; +- __m256i_out = __lasx_xvsadd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xc2fc0000; +- *((int*)& __m128_op1[2]) = 0xc3040000; +- *((int*)& __m128_op1[1]) = 0xc2fc0000; +- *((int*)& __m128_op1[0]) = 0xc3040000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc800c800c800c800; +- *((unsigned long*)& __m256i_op0[2]) = 0x8800c800c800c801; +- *((unsigned long*)& __m256i_op0[1]) = 0xc800c800c800c800; +- *((unsigned long*)& __m256i_op0[0]) = 0x8800c800c800c801; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00fe00fe; +- *((int*)& __m128_op0[2]) = 0x000200fe; +- *((int*)& __m128_op0[1]) = 0x00fe00fe; +- *((int*)& __m128_op0[0]) = 0x000200fe; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op0[2]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_op0[1]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op0[0]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000f90; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000f90; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffefffe00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffefffe00000000; +- __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00003fff00003fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00003fff00003fff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000208000002080; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[0]) = 0x1000100010001000; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1000100010001000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_result[1]) = 0x2080208020802080; +- *((unsigned long*)& __m128i_result[0]) = 0x2080208020802080; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128i_op0[0]) = 0xa352bfac9269e0aa; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128i_result[0]) = 0xa352bfac9269e0aa; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128d_op0[0]) = 0xa352bfac9269e0aa; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000208000002080; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x000007c8; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x000007c8; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; +- *((unsigned long*)& __m128i_result[1]) = 0xffffd70b00006ea9; +- *((unsigned long*)& __m128i_result[0]) = 0xffffa352ffff9269; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000208000002080; +- *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; +- *((unsigned long*)& __m128i_result[1]) = 0xffffd70b00006ea9; +- *((unsigned long*)& __m128i_result[0]) = 0xffffa352ffff9269; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffd70b00006ea9; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffa352ffff9269; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffd70b00006ea9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffa352ffff9269; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff0001; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op1[2]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_op1[1]) = 0xc848c848c848c848; +- *((unsigned long*)& __m256i_op1[0]) = 0x8848c848c848c848; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000007c8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000007c8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000007c8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000007c8; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fe01fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fe01fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000007c8; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000007c8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x01fe01fe0000ff01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe0000ff01; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000003fbfc04; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001fdfe02; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000003fbfc04; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001fdfe02; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000fd; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000000007c8; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000000007c8; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000001fe01fe; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000ff0100; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000001fe01fe; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000ff0100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xbba0c07b51230d5c; +- *((unsigned long*)& __m128d_op0[0]) = 0xa15f3f9e8763c2b9; +- *((unsigned long*)& __m128d_op1[1]) = 0xbba0c07b51230d5c; +- *((unsigned long*)& __m128d_op1[0]) = 0xa15f3f9e8763c2b9; +- *((int*)& __m128_result[3]) = 0x9d0603db; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x9d0603db; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128i_op0[0]) = 0xa352bfac9269e0aa; +- *((int*)& __m128_result[3]) = 0xce23d33d; +- *((int*)& __m128_result[2]) = 0x4edd53ea; +- *((int*)& __m128_result[1]) = 0xceb95a81; +- *((int*)& __m128_result[0]) = 0xcedb2c3f; +- __m128_out = __lsx_vffint_s_w(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128i_op1[0]) = 0xa352bfac9269e0aa; +- *((unsigned long*)& __m128i_result[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128i_result[0]) = 0xa352bfac9269e0aa; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000007c8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000007c8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001fe01fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fe01fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000c8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000c8; +- __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000fd; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[2]) = 0x00000004000000fd; +- *((unsigned long*)& __m256i_result[1]) = 0x00000004000000fe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000003fbfc04; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001fdfe02; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000003fbfc04; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001fdfe02; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_d(__m256i_op0,13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xce23d33e43d9736c; +- *((unsigned long*)& __m128i_op0[0]) = 0x63b2ac27aa076aeb; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x31dc2cc1bc268c93; +- *((unsigned long*)& __m128i_result[0]) = 0x9c4d53d855f89514; +- __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_d(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xce23d33e43d9736c; +- *((unsigned long*)& __m128i_op1[0]) = 0x63b2ac27aa076aeb; +- *((unsigned long*)& __m128i_result[1]) = 0x63b2ac27aa076aeb; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0xc8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x31dc2cc1bc268c93; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c4d53d855f89514; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; +- __m128i_out = __lsx_vslei_h(__m128i_op0,13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000020006; +- *((unsigned long*)& __m128i_result[1]) = 0x0000060000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffff0000; +- *((int*)& __m128_op0[0]) = 0x0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; +- __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x3e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x63b2ac27aa076aeb; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000063b2ac27; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffaa076aeb; +- __m128i_out = __lsx_vexth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; +- __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000063b2ac27; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffaa076aeb; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9515; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000063b2ac27; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffaa076aeb; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff63b3584e; +- *((unsigned long*)& __m128i_result[0]) = 0x0000fffdaa07d5d6; +- __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c8; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000600; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000ac26; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff80000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000060000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000003000000d613; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c0000000; +- __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000fd; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000062d4; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000062d4; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000006338; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xd70b30c96ea9f4e8; +- *((unsigned long*)& __m128d_op0[0]) = 0xa352bfac9269e0aa; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000003000000d613; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c0000000; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000003000000d613; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000003000000d612; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000bfffffff; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000c9; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,-15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000060000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000060000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff0fffffff00001; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff0fffffff09515; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000100010000ffda; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000016; +- __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000003000000d612; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000bfffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000500000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000060000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000060000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0600000100000001; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000ff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; +- __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffff0100ff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffeffff; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000060000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000500000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000060000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000fffe00006aea; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256d_op1[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256d_op1[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256d_op1[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffbfffefffc9510; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffbfffefffc9510; +- *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; +- *((unsigned long*)& __m128i_op2[1]) = 0xfffbfffefffc9510; +- *((unsigned long*)& __m128i_op2[0]) = 0xfffbfffefffc9510; +- *((unsigned long*)& __m128i_result[1]) = 0x29c251319c3a5c90; +- *((unsigned long*)& __m128i_result[0]) = 0x62fb9272df7da6b0; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op1[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9515; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff0100ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0607060700000807; +- *((unsigned long*)& __m128i_op1[0]) = 0x0707f8f803e8157e; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x31); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x000000f0; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x000000f0; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x000000f0; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x000000f0; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000c9; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x01010101010101c9; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x01010101010101c9; +- __m256i_out = __lasx_xvbitset_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x01010101010101c9; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x01010101010101c9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff88; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe98; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; +- __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffe98; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe98; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x06070607; +- *((int*)& __m128_op0[2]) = 0x00000807; +- *((int*)& __m128_op0[1]) = 0x0707f8f8; +- *((int*)& __m128_op0[0]) = 0x03e8157e; +- *((int*)& __m128_result[3]) = 0x5c303f97; +- *((int*)& __m128_result[2]) = 0x61ff9049; +- *((int*)& __m128_result[1]) = 0x5bafa1dd; +- *((int*)& __m128_result[0]) = 0x5d3e1e1d; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffff53d9; +- *((int*)& __m128_op0[1]) = 0xffff0001; +- *((int*)& __m128_op0[0]) = 0xffff9515; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9514; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xbfbfbfbfbfbfbfbf; +- *((unsigned long*)& __m128i_result[0]) = 0xbfbfbfbfbfbfbfbf; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_result[0]) = 0xff000001ffff9515; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0x67); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffe98; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; +- __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op0[0]) = 0xff000001ffff9515; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007fffa9ed; +- *((unsigned long*)& __m128i_result[0]) = 0x7f8000017fffca8b; +- __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; +- *((unsigned long*)& __m128i_op0[0]) = 0xff000001ffff9515; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9514; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff0000ac26; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff000000000001; +- __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x01010101010101c9; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x01010101010101c9; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000003f; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; +- __m256i_out = __lasx_xvreplve0_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff00000000; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x01010101010101c9; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x01010101010101c9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x2c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x01010101010101c9; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x01010101010101c9; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000781; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[3]) = 0x0008080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0008080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000003c; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x45); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000027; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000027; +- __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0x0000ac26; +- *((int*)& __m128_op0[1]) = 0x00ff0000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrmh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0c0b0a090b0a0908; +- *((unsigned long*)& __m128i_op0[0]) = 0x0a09080709080706; +- *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; +- *((unsigned long*)& __m128i_result[1]) = 0x0c0b0a090b0a0908; +- *((unsigned long*)& __m128i_result[0]) = 0x0a09080709080706; +- __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff0000ac26; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0c0b0a09; +- *((int*)& __m128_op0[2]) = 0x0b0a0908; +- *((int*)& __m128_op0[1]) = 0x0a090807; +- *((int*)& __m128_op0[0]) = 0x09080706; +- *((int*)& __m128_op1[3]) = 0x0c0b0a09; +- *((int*)& __m128_op1[2]) = 0x0b0a0908; +- *((int*)& __m128_op1[1]) = 0x0a090807; +- *((int*)& __m128_op1[0]) = 0x09080706; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000781; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x0c0b0a09; +- *((int*)& __m128_op0[2]) = 0x0b0a0908; +- *((int*)& __m128_op0[1]) = 0x0a090807; +- *((int*)& __m128_op0[0]) = 0x09080706; +- *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; +- __m128i_out = __lsx_vfclass_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fffe00006aea; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffce; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000010002; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff960015; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000010002; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffff960015; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_w_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x000000ff; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x000000ff; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000007fffa9ed; +- *((unsigned long*)& __m128d_op0[0]) = 0x7f8000017fffca8b; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0c0b0a090b0a0908; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a09080709080706; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00010002; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xff960015; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffd60015; +- __m128i_out = __lsx_vfrintrm_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffd60015; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x80808080806b000b; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000001fe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff0000ac26; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff80005613; +- *((unsigned long*)& __m128i_result[0]) = 0x007f800000000000; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_w(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000ffce; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80005613; +- *((unsigned long*)& __m128i_op1[0]) = 0x007f800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000807f80808000; +- *((unsigned long*)& __m128i_result[0]) = 0x80006b0000000b00; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000807f80808000; +- *((unsigned long*)& __m128i_op0[0]) = 0x80006b0000000b00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0x8000807f00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x80006b0080808080; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000807f00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x80006b0080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff7fff; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000078100000064; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000002b0995850; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff80005613; +- *((unsigned long*)& __m128i_op1[0]) = 0x007f800000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffff80005613; +- *((unsigned long*)& __m128i_op2[0]) = 0x007f800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff00011cf0c569; +- *((unsigned long*)& __m128i_result[0]) = 0xc0000002b0995850; +- __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000781; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000781; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000064; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000064; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000781; +- *((int*)& __m256_op0[0]) = 0x00000064; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0000ffce; +- *((int*)& __m128_op1[3]) = 0xffff0001; +- *((int*)& __m128_op1[2]) = 0x1cf0c569; +- *((int*)& __m128_op1[1]) = 0xc0000002; +- *((int*)& __m128_op1[0]) = 0xb0995850; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- long_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000064; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000781; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000064; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80005613; +- *((unsigned long*)& __m128i_op0[0]) = 0x007f800000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff80005613; +- *((unsigned long*)& __m128i_result[0]) = 0x81000080806b000b; +- __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000807f00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x80006b0080808080; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff00011cf0c569; +- *((unsigned long*)& __m128i_op1[0]) = 0xc0000002b0995850; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffe30f3a97; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffcfe72830; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80005613; +- *((unsigned long*)& __m128i_op0[0]) = 0x81000080806b000b; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff00011cf0c569; +- *((unsigned long*)& __m128i_op1[0]) = 0xc0000002b0995850; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff9cf0d77b; +- *((unsigned long*)& __m128i_result[0]) = 0xc1000082b0fb585b; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x4000000000000000; +- __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; +- __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x5a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080808000; +- __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000080808000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000080808000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x8b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffff00011cf0c569; +- *((unsigned long*)& __m128d_op0[0]) = 0xc0000002b0995850; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x22); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff9cf0d77b; +- *((unsigned long*)& __m128i_op1[0]) = 0xc1000082b0fb585b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,-5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000080808000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000080808000; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000032; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000003c000000032; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000004e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000032; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000003c000000032; +- *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x001000100010000a; +- *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x001000060010000a; +- __m256i_out = __lasx_xvclz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000080808000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0080008000800080; +- *((unsigned long*)& __m128i_result[0]) = 0x0080006b0000000b; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000004e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fffefffe; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fffefffe; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000004e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextl_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffbfffb; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xf4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vpcnt_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010001; +- __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvsll_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00800080; +- *((int*)& __m128_op0[2]) = 0x00800080; +- *((int*)& __m128_op0[1]) = 0x0080006b; +- *((int*)& __m128_op0[0]) = 0x0000000b; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x80808080; +- *((int*)& __m128_op1[0]) = 0x806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x2f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x80808080806b000b; +- __m128i_out = __lsx_vsadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((int*)& __m256_op1[7]) = 0x7ff00000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x7ff00000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x7ff00000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x7ff00000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x7ff00000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x7ff00000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x7ff00000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x7ff00000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0080008000800080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0080006b0000000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000001ff1745745c; +- __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x80808080806b000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000c0c0c000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080008000800080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0080006b0000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000800080; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xa1a1a1a1a1a1a1a1; +- *((unsigned long*)& __m256i_result[2]) = 0xa1a1a1a15e5e5e5e; +- *((unsigned long*)& __m256i_result[1]) = 0xa1a1a1a1a1a1a1a1; +- *((unsigned long*)& __m256i_result[0]) = 0xa1a1a1a15e5e5e5e; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0xa1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; +- *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; +- *((unsigned long*)& __m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; +- *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000001; +- __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000800080; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x13121110; +- *((int*)& __m128_op0[2]) = 0x1211100f; +- *((int*)& __m128_op0[1]) = 0x11100f0e; +- *((int*)& __m128_op0[0]) = 0x100f0e0d; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x131211101211100f; +- *((unsigned long*)& __m128d_op0[0]) = 0x11100f0e100f0e0d; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; +- *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; +- *((unsigned long*)& __m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; +- *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xa1a1a1a1a1a15e5e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xa1a1a1a1a1a15e5e; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xc0c0c000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00800080; +- *((int*)& __m128_op1[2]) = 0x00800080; +- *((int*)& __m128_op1[1]) = 0x0080006b; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00800080; +- *((int*)& __m128_result[2]) = 0xc0c0c000; +- *((int*)& __m128_result[1]) = 0x0080006b; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000040002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00fe01e000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00fe01e000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a1a1a15e5e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a1a1a15e5e; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800080; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0080008000800080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0080006b00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffff80000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffff80000; +- __m256i_out = __lasx_xvslli_d(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000400000004000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00004000ffffffff; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080008000800080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0080006b00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x001b19b1c9c6da5a; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x001b19b1c9c6da5a; +- *((unsigned long*)& __m128i_result[1]) = 0x0080008000800080; +- *((unsigned long*)& __m128i_result[0]) = 0x008003496dea0c61; +- __m128i_out = __lsx_vmaddwod_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x131211101211100f; +- *((unsigned long*)& __m128i_op0[0]) = 0x11100f0e100f0e0d; +- *((unsigned long*)& __m128i_result[1]) = 0x13101213120f1112; +- *((unsigned long*)& __m128i_result[0]) = 0x110e1011100d0f10; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xcb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; +- *((unsigned long*)& __m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; +- *((unsigned long*)& __m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; +- *((unsigned long*)& __m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; +- *((unsigned long*)& __m256i_result[3]) = 0xa1bfa1bfa1bfa1bf; +- *((unsigned long*)& __m256i_result[2]) = 0xa1bfa1bf5e7c5e7c; +- *((unsigned long*)& __m256i_result[1]) = 0xa1bfa1bfa1bfa1bf; +- *((unsigned long*)& __m256i_result[0]) = 0xa1bfa1bf5e7c5e7c; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x2); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x007d003e007d003e; +- *((unsigned long*)& __m256i_result[2]) = 0x007d003effa80010; +- *((unsigned long*)& __m256i_result[1]) = 0x007d003e007d003e; +- *((unsigned long*)& __m256i_result[0]) = 0x007d003effa80010; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0080008000800080; +- *((unsigned long*)& __m128i_op1[0]) = 0x008003496dea0c61; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00004000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0xf7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x007d003e007d003e; +- *((unsigned long*)& __m256i_op1[2]) = 0x007d003effa80010; +- *((unsigned long*)& __m256i_op1[1]) = 0x007d003e007d003e; +- *((unsigned long*)& __m256i_op1[0]) = 0x007d003effa80010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000f0000000f; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0080008000800080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_d(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0080008000800080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001300000013; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff457db03f; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff457db03f; +- __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000457d; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000b03f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000457d; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000b03f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x3b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x31); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[2]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_op0[1]) = 0x457db03e457db03e; +- *((unsigned long*)& __m256i_op0[0]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000f000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000f000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0008b03e457db03e; +- *((unsigned long*)& __m256i_result[2]) = 0x457db03e45a87310; +- *((unsigned long*)& __m256i_result[1]) = 0x0008b03e457db03e; +- *((unsigned long*)& __m256i_result[0]) = 0x457db03e45a87310; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000f000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000f000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffba8300004fc2; +- __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000457d607d; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff457d607f; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000457d607d; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff457d607f; +- __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000457d607d; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457d607f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000457d607d; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457d607f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffa2beb040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffa2beb040; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000457d607d; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457d607f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000457d607d; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457d607f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffa2beb040; +- __m256i_out = __lasx_xvavgr_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000457db03e; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff457db03f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00020001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00020001; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256d_op0[2]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256d_op0[0]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x000f000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000f000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256d_result[2]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256d_result[0]) = 0x7fffffffa2beb040; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000f000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000f000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000f000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000f000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfff1000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfff1000000000000; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000020002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_h(__m128i_op0,2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_result[3]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_result[2]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_result[1]) = 0xffffba8300004fc2; +- *((unsigned long*)& __m256i_result[0]) = 0xffffba8300004fc2; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001000100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001000100; +- __m128i_out = __lsx_vsrli_d(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xfff1000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xfff1000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x232221201f1e1d1c; +- *((unsigned long*)& __m256i_op1[2]) = 0x1b1a191817161514; +- *((unsigned long*)& __m256i_op1[1]) = 0x232221201f1e1d1c; +- *((unsigned long*)& __m256i_op1[0]) = 0x1b1a191817161514; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000000f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000f; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256d_result[3]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256d_result[2]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256d_result[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256d_result[0]) = 0xc1d75053f0000000; +- __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x000100010000fffb; +- *((unsigned long*)& __m128i_result[0]) = 0x000100010000fffb; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; +- *((unsigned long*)& __m128i_result[0]) = 0x0303030303030304; +- __m128i_out = __lsx_vaddi_bu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128d_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_result[3]) = 0x004100df00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00c000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x004100df00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00c000000000; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000022be22be; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fffa2bea2be; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000022be22be; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fffa2bea2be; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x004100df00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00c000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x004100df00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00c000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_result[3]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_result[2]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_result[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_result[0]) = 0xc1d75053f0000000; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010000; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00010001; +- *((int*)& __m128_op0[2]) = 0x00010001; +- *((int*)& __m128_op0[1]) = 0x00010001; +- *((int*)& __m128_op0[0]) = 0x00010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000022be22be; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fffa2bea2be; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000022be22be; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fffa2bea2be; +- *((unsigned long*)& __m256i_result[3]) = 0xffe1ffe1229f229f; +- *((unsigned long*)& __m256i_result[2]) = 0x7fe07fe0a29fa29f; +- *((unsigned long*)& __m256i_result[1]) = 0xffe1ffe1229f229f; +- *((unsigned long*)& __m256i_result[0]) = 0x7fe07fe0a29fa29f; +- __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000400000000; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffa30000165a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000104000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffa30000165a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000104000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_result[3]) = 0xbe21000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000505300000000; +- *((unsigned long*)& __m256i_result[1]) = 0xbe21000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000505300000000; +- __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffa30000165a; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000104000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffa30000165a; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000104000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256d_op1[2]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256d_op1[0]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc1d75053f0000000; +- *((int*)& __m256_result[7]) = 0xc03ae000; +- *((int*)& __m256_result[6]) = 0x420a6000; +- *((int*)& __m256_result[5]) = 0xc6000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0xc03ae000; +- *((int*)& __m256_result[2]) = 0x420a6000; +- *((int*)& __m256_result[1]) = 0xc6000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_du_q(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0xbe21000100000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000505300000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xbe21000100000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000505300000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op2[2]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x41dfffffffc00000; +- *((unsigned long*)& __m256i_op2[0]) = 0xc1d75053f0000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00005053000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00005053000000ff; +- __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffa30000165a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000104000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffa30000165a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000104000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000165a; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x40b2bf4d; +- *((int*)& __m256_op0[6]) = 0x30313031; +- *((int*)& __m256_op0[5]) = 0x50005000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x40b2bf4d; +- *((int*)& __m256_op0[2]) = 0x30313031; +- *((int*)& __m256_op0[1]) = 0x50005000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x22be22be; +- *((int*)& __m256_op1[5]) = 0x7fff7fff; +- *((int*)& __m256_op1[4]) = 0xa2bea2be; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x22be22be; +- *((int*)& __m256_op1[1]) = 0x7fff7fff; +- *((int*)& __m256_op1[0]) = 0xa2bea2be; +- *((int*)& __m256_result[7]) = 0x40b2bf4d; +- *((int*)& __m256_result[6]) = 0x30313031; +- *((int*)& __m256_result[5]) = 0x7fff7fff; +- *((int*)& __m256_result[4]) = 0xa2bea2be; +- *((int*)& __m256_result[3]) = 0x40b2bf4d; +- *((int*)& __m256_result[2]) = 0x30313031; +- *((int*)& __m256_result[1]) = 0x7fff7fff; +- *((int*)& __m256_result[0]) = 0xa2bea2be; +- __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0xfffefffe; +- *((int*)& __m128_op0[2]) = 0xfffeffff; +- *((int*)& __m128_op0[1]) = 0xfffefffe; +- *((int*)& __m128_op0[0]) = 0xfffeffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffefffefffeffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffefffefffeffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00005053000000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00005053000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00010001; +- *((int*)& __m128_op1[2]) = 0x00010001; +- *((int*)& __m128_op1[1]) = 0x00010001; +- *((int*)& __m128_op1[0]) = 0x00010001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x3f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffe0000000; +- __m128d_out = __lsx_vfcvth_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x40b2bf4d30313031; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fffa2bea2be; +- *((unsigned long*)& __m256i_op0[1]) = 0x40b2bf4d30313031; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fffa2bea2be; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x40b240b330313031; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff5d425d42; +- *((unsigned long*)& __m256i_result[1]) = 0x40b240b330313031; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff5d425d42; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_op1[3]) = 0x40b240b330313031; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff5d425d42; +- *((unsigned long*)& __m256i_op1[1]) = 0x40b240b330313031; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff5d425d42; +- *((unsigned long*)& __m256i_result[3]) = 0x000040b200002fd4; +- *((unsigned long*)& __m256i_result[2]) = 0x00007fff0000739c; +- *((unsigned long*)& __m256i_result[1]) = 0x000040b200002fd4; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff0000739c; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_bu(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000400000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_result[2]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_result[0]) = 0xc600000000000000; +- __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000040b200002fd4; +- *((unsigned long*)& __m256i_op1[2]) = 0x00007fff0000739c; +- *((unsigned long*)& __m256i_op1[1]) = 0x000040b200002fd4; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007fff0000739c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000739c; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000013ec13e; +- *((unsigned long*)& __m128d_op1[0]) = 0xc03fc03fc0ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000040b200002fd4; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007fff0000739c; +- *((unsigned long*)& __m256i_op0[1]) = 0x000040b200002fd4; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007fff0000739c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; +- __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffe0000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffe0000000; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ffff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffc03fffffffc0; +- *((unsigned long*)& __m256i_result[2]) = 0xffffc00000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffc03fffffffc0; +- *((unsigned long*)& __m256i_result[0]) = 0xffffc00000000000; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000004; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xe0000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfrecip_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x013ec13e; +- *((int*)& __m128_op0[1]) = 0xc03fc03f; +- *((int*)& __m128_op0[0]) = 0xc0ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffdfffffff8; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000008000165a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000008000165a; +- *((unsigned long*)& __m256i_result[3]) = 0xffff00017fff005d; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fffe9a6; +- *((unsigned long*)& __m256i_result[1]) = 0xffff00017fff005d; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fffe9a6; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000011f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000011f; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000192540; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000192540; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffa3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000165a; +- *((unsigned long*)& __m256i_result[3]) = 0x1818ffff1818ffa3; +- *((unsigned long*)& __m256i_result[2]) = 0x181818181818185a; +- *((unsigned long*)& __m256i_result[1]) = 0x1818ffff1818ffa3; +- *((unsigned long*)& __m256i_result[0]) = 0x181818181818185a; +- __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffdfffffff8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7ffffffc; +- __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffc03b1fc5e050; +- *((unsigned long*)& __m256d_op0[2]) = 0x6a9e3fa2603a2000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffc03b1fc5e050; +- *((unsigned long*)& __m256d_op0[0]) = 0x6a9e3fa2603a2000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636389038903; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636389038903; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000001ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000001ffff; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fe70000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fe70000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007fe70000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fe70000; +- *((unsigned long*)& __m256i_result[3]) = 0x00007f7f80007fa3; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007f670000; +- *((unsigned long*)& __m256i_result[1]) = 0x00007f7f80007fa3; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007f670000; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc03fffffffc0; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffc00000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffc03fffffffc0; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffc00000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_result[2]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_result[0]) = 0xc600000000000000; +- __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fe70000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fe70000; +- *((unsigned long*)& __m256i_op1[3]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x7e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000007ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000007ffffffff; +- __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; +- __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000008000165a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff8000ffa3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000008000165a; +- *((unsigned long*)& __m256i_result[3]) = 0x0009000900090009; +- *((unsigned long*)& __m256i_result[2]) = 0x000900090009165a; +- *((unsigned long*)& __m256i_result[1]) = 0x0009000900090009; +- *((unsigned long*)& __m256i_result[0]) = 0x000900090009165a; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc03ae000ffff6000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00007f7f80007fa3; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007f670000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00007f7f80007fa3; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007f670000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffe7fffffff; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x7fffffff7ffffffb; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe7fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000001fd02; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffe1fffffff; +- __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffc03b1fc5e050; +- *((unsigned long*)& __m256i_op0[2]) = 0x6a9e3fa2603a2000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffc03b1fc5e050; +- *((unsigned long*)& __m256i_op0[0]) = 0x6a9e3fa2603a2000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffc03fffffffc0; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffc00000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffc03fffffffc0; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffc00000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x01fe007a01c40110; +- *((unsigned long*)& __m256i_result[2]) = 0x019d00a2003a0000; +- *((unsigned long*)& __m256i_result[1]) = 0x01fe007a01c40110; +- *((unsigned long*)& __m256i_result[0]) = 0x019d00a2003a0000; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffe1fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7ffffffb; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000080008; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000077fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x000000ff; +- *((int*)& __m128_op1[0]) = 0xfe01fd02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000080008; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000fffe01fd02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000040002; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01fe007a01c40110; +- *((unsigned long*)& __m256i_op0[2]) = 0x019d00a2003a0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x01fe007a01c40110; +- *((unsigned long*)& __m256i_op0[0]) = 0x019d00a2003a0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000077fff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x01fe007a01c40110; +- *((unsigned long*)& __m256i_result[2]) = 0x019d00a20039fff9; +- *((unsigned long*)& __m256i_result[1]) = 0x01fe007a01c40110; +- *((unsigned long*)& __m256i_result[0]) = 0x019d00a2003a0000; +- __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x7fffffff7ffffffb; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000040002; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000080008; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_result[3]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xc1f0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xc1f0000000000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x000000ff; +- *((int*)& __m128_op0[0]) = 0xfe01fd02; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x0001fe01; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ff02ff80fede; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ff02ff80fede; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fffe00800022; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fffe00800022; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000001fe01; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000001fe01; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m128i_result[0]) = 0x0f0f0f0f00000000; +- __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000077fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000003ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x6a9e3f9a; +- *((int*)& __m256_op0[4]) = 0x603a2001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x6a9e3f9a; +- *((int*)& __m256_op0[0]) = 0x603a2001; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000900000009; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff7fffffff7f; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000fffe00800022; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffe00800022; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00007fff00400011; +- *((unsigned long*)& __m256i_result[2]) = 0x000000008001ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00007fff00400011; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000077fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000307; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x00ff80ff00ff80ff; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000900000009; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x01fe007a; +- *((int*)& __m256_op0[6]) = 0x01c40110; +- *((int*)& __m256_op0[5]) = 0x019d00a2; +- *((int*)& __m256_op0[4]) = 0x0039fff9; +- *((int*)& __m256_op0[3]) = 0x01fe007a; +- *((int*)& __m256_op0[2]) = 0x01c40110; +- *((int*)& __m256_op0[1]) = 0x019d00a2; +- *((int*)& __m256_op0[0]) = 0x003a0000; +- *((int*)& __m256_op1[7]) = 0x0000fffe; +- *((int*)& __m256_op1[6]) = 0x00800022; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x0000fffe; +- *((int*)& __m256_op1[2]) = 0x00800022; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffff7fffffff7f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0f0f0f0f00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0f07697100000000; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff80ff00ff80ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ffffff81fe; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffff00ffff7e01; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000fffe01fd02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000fe86; +- __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0f0f0f0f00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000fffe01fd02; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ffffffff00; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ffffffff00; +- __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000fffe00800022; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000fffe00800022; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; +- __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ffffff81fe; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff00ffff7e01; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x000000fffe01fd02; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00fe00fffe86f901; +- __m128i_out = __lsx_vmaddwev_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000100; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000100; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001000000ff; +- __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvsubwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x000000ff; +- *((int*)& __m128_op0[0]) = 0xfe01fd02; +- *((int*)& __m128_op1[3]) = 0x00000001; +- *((int*)& __m128_op1[2]) = 0x00000100; +- *((int*)& __m128_op1[1]) = 0x00000001; +- *((int*)& __m128_op1[0]) = 0x00000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000077fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000100; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff80ff00ff80ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; +- __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000077fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000007ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0003ffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000003ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000007ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01fe007a01c40110; +- *((unsigned long*)& __m256i_op0[2]) = 0x019d00a20039fff9; +- *((unsigned long*)& __m256i_op0[1]) = 0x01fe007a01c40110; +- *((unsigned long*)& __m256i_op0[0]) = 0x019d00a2003a0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x01fe007a01c40110; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x01fe007a01c40110; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000003ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000003ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000077fff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_du(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256i_result[2]) = 0x8d8d72728d8d8d8d; +- *((unsigned long*)& __m256i_result[1]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256i_result[0]) = 0x8d8d72728d8d8d8d; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0x8d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256i_op1[2]) = 0x8d8d72728d8d8d8d; +- *((unsigned long*)& __m256i_op1[1]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256i_op1[0]) = 0x8d8d72728d8d8d8d; +- *((unsigned long*)& __m256i_result[3]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256i_result[2]) = 0x8d8d72728d8d8d8d; +- *((unsigned long*)& __m256i_result[1]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256i_result[0]) = 0x8d8d72728d8d8d8d; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000003ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256i_op0[2]) = 0x8d8d72728d8d8d8d; +- *((unsigned long*)& __m256i_op0[1]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256i_op0[0]) = 0x8d8d72728d8d8d8d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0003ffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; +- __m128i_out = __lsx_vfclass_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0f07697100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000076971000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0x7f800000; +- *((int*)& __m128_op0[2]) = 0x7f800000; +- *((int*)& __m128_op0[1]) = 0x7f800000; +- *((int*)& __m128_op0[0]) = 0x7f800000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x21); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000040000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000040000000; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000040000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000040000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x3fc000005fc00000; +- *((unsigned long*)& __m128i_result[0]) = 0x3fc000005fc00000; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7fc00000; +- *((int*)& __m128_result[2]) = 0x7fc00000; +- *((int*)& __m128_result[1]) = 0x7fc00000; +- *((int*)& __m128_result[0]) = 0x7fc00000; +- __m128_out = __lsx_vfdiv_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256d_op0[2]) = 0x8d8d72728d8d8d8d; +- *((unsigned long*)& __m256d_op0[1]) = 0x8d8d72728d8d7272; +- *((unsigned long*)& __m256d_op0[0]) = 0x8d8d72728d8d8d8d; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvabsd_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x01fe007a; +- *((int*)& __m256_op1[6]) = 0x01c40110; +- *((int*)& __m256_op1[5]) = 0x019d00a2; +- *((int*)& __m256_op1[4]) = 0x0039fff9; +- *((int*)& __m256_op1[3]) = 0x01fe007a; +- *((int*)& __m256_op1[2]) = 0x01c40110; +- *((int*)& __m256_op1[1]) = 0x019d00a2; +- *((int*)& __m256_op1[0]) = 0x003a0000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xff800000; +- *((int*)& __m256_result[6]) = 0xff800000; +- *((int*)& __m256_result[5]) = 0xff800000; +- *((int*)& __m256_result[4]) = 0xff800000; +- *((int*)& __m256_result[3]) = 0xff800000; +- *((int*)& __m256_result[2]) = 0xff800000; +- *((int*)& __m256_result[1]) = 0xff800000; +- *((int*)& __m256_result[0]) = 0xff800000; +- __m256_out = __lasx_xvflogb_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xc0008000c0008000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xc0008000c0008000; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_result[2]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800f800; +- __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_op1[2]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_op1[1]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_op1[0]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffff8000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffff8000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffff8000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffff8000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc0008000c0008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc0008000c0008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x80000000; +- *((int*)& __m256_op1[4]) = 0x80000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x80000000; +- *((int*)& __m256_op1[0]) = 0x80000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000000000000b; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000000000000b; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc0008000c0008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc0008000c0008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc0008000c0008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc0008000c0008000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x8001000180010000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x8001000180010000; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x80000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x80000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextl_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7fc00000; +- *((int*)& __m256_result[6]) = 0x7fc00000; +- *((int*)& __m256_result[5]) = 0x7fc00000; +- *((int*)& __m256_result[4]) = 0x7fc00000; +- *((int*)& __m256_result[3]) = 0x7fc00000; +- *((int*)& __m256_result[2]) = 0x7fc00000; +- *((int*)& __m256_result[1]) = 0x7fc00000; +- *((int*)& __m256_result[0]) = 0x7fc00000; +- __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000001; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000001; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; +- __m256i_out = __lasx_xvsll_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff800080000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff008000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff008000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff008000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff008000000000; +- __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff800080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff800080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0xfffffff5; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000001ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000001ff; +- __m256i_out = __lasx_xvsat_du(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000fffffff5; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbsll_v(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0010000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0010000100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0010000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0010000100000000; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010000100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010000100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff800080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff800080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff80000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff80000000; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x8f8f8f8f8f8f8f8f; +- *((unsigned long*)& __m128i_result[0]) = 0x8f8f8f8f8f8f8f8f; +- __m128i_out = __lsx_vaddi_bu(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfsub_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8f8f8f8f8f8f8f8f; +- *((unsigned long*)& __m128i_op1[0]) = 0x8f8f8f8f8f8f8f8f; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010002; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op2[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op2[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0001808281820102; +- *((unsigned long*)& __m128i_result[0]) = 0x0001808201018081; +- __m128i_out = __lsx_vmaddwev_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001808281820102; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001808201018081; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0001008281820102; +- *((unsigned long*)& __m128i_result[0]) = 0x0001008201010081; +- __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x80808080; +- *((int*)& __m128_op1[2]) = 0x80808080; +- *((int*)& __m128_op1[1]) = 0x80808080; +- *((int*)& __m128_op1[0]) = 0x80808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; +- __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_result[0]) = 0x4040404040404040; +- __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0007000100040102; +- *((unsigned long*)& __m128d_op0[0]) = 0x0003000100010101; +- *((unsigned long*)& __m128d_op1[1]) = 0x0007000100040102; +- *((unsigned long*)& __m128d_op1[0]) = 0x0003000100010101; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_w(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,-15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000001; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000fe000000fe; +- *((unsigned long*)& __m256i_result[2]) = 0x000000fe000000fe; +- *((unsigned long*)& __m256i_result[1]) = 0x000000fe000000fe; +- *((unsigned long*)& __m256i_result[0]) = 0x000000fe000000fe; +- __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000300000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x001c001c001c001c; +- *((unsigned long*)& __m128i_result[0]) = 0x001c001c001c001c; +- __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffe5ffe5ffe5ffe5; +- *((unsigned long*)& __m256i_result[2]) = 0xffe5ffe5ffe5ffe5; +- *((unsigned long*)& __m256i_result[1]) = 0xffe5ffe5ffe5ffe5; +- *((unsigned long*)& __m256i_result[0]) = 0xffe5ffe5ffe5ffe5; +- __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffeb; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffeb; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff800200000002; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff800200000002; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffeb; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffeb; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_h(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff800000000000; +- __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000300000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffdffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffeffff; +- __m128i_out = __lsx_vneg_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffeb; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffeb; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000015; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x5858585858585858; +- *((unsigned long*)& __m256i_result[2]) = 0x5858585858585858; +- *((unsigned long*)& __m256i_result[1]) = 0x5858585858585858; +- *((unsigned long*)& __m256i_result[0]) = 0x5858585858585858; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0xa7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1e1e1e0000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1e1e1e0000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1e1e1e0000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1e1e1e0000000000; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextl_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x2000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffa; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffbfbfbfc0; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbfbfbfc0; +- *((unsigned long*)& __m128i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m128i_result[1]) = 0xffbfffbfff7fff80; +- *((unsigned long*)& __m128i_result[0]) = 0xffbfffbfff7fff80; +- __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x54); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffa; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000300000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_result[1]) = 0x00000002fffffffb; +- *((unsigned long*)& __m128i_result[0]) = 0x000000010000fffb; +- __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000040804000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000040804000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000040a04000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000040a04000; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe6; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffe6; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffe6; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffe6; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff800000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; +- __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000040a04000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000040a04000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000040a04000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000040a04000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffa; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000002fffffffb; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000010000fffb; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000bffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x42); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0xbffffffe; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_b(__m128i_op0,0x5); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0xe7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f8f7f8f7f8f7f8; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f8f7f8f7f8f7f8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xf7f8f7f8f7f8f7f8; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xf7f8f7f8f7f8f7f8; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000bffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000bffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vclz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf7f7f7f7f7f7f7f8; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xf7f7f7f7f7f7f7f8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000e0000002e; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000004e; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0f000f000f000f00; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0f000f000f000f00; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000101000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000101000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffffffff; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; +- __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000500000000; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xffff0000; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xf7f7f7f7; +- *((int*)& __m256_op1[6]) = 0xf7f7f7f8; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0xf7f7f7f7; +- *((int*)& __m256_op1[2]) = 0xf7f7f7f8; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_d(__m128i_op0,0x3a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfff10000fff10000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xfff10000; +- *((int*)& __m256_op0[4]) = 0xfff10000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xfff10000; +- *((int*)& __m256_op0[0]) = 0xfff10000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xfff10000; +- *((int*)& __m256_result[4]) = 0xfff10000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xfff10000; +- *((int*)& __m256_result[0]) = 0xfff10000; +- __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff1fffffff1; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000001ffe2000; +- *((unsigned long*)& __m256i_result[2]) = 0x001fe020001fe020; +- *((unsigned long*)& __m256i_result[1]) = 0x000000001ffe2000; +- *((unsigned long*)& __m256i_result[0]) = 0x001fe020001fe020; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xff800000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xff800000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff80000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000001ffe2000; +- *((unsigned long*)& __m256d_op0[2]) = 0x001fe020001fe020; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000001ffe2000; +- *((unsigned long*)& __m256d_op0[0]) = 0x001fe020001fe020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff1fffffff1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xfff10000; +- *((int*)& __m256_op0[4]) = 0xfff10000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xfff10000; +- *((int*)& __m256_op0[0]) = 0xfff10000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xfff10000; +- *((int*)& __m256_op1[4]) = 0xfff10000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xfff10000; +- *((int*)& __m256_op1[0]) = 0xfff10000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff88ff88; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xfff10000; +- *((int*)& __m256_op0[4]) = 0xfff10000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xfff10000; +- *((int*)& __m256_op0[0]) = 0xfff10000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xfff10000; +- *((int*)& __m256_op1[4]) = 0xfff10000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; +- __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff000000ff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0080000000800000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfff1000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfff1000000000000; +- __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000001ffe2000; +- *((unsigned long*)& __m256i_op0[2]) = 0x001fe020001fe020; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000001ffe2000; +- *((unsigned long*)& __m256i_op0[0]) = 0x001fe020001fe020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; +- __m256i_out = __lasx_xvsle_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xfff10000; +- *((int*)& __m256_op0[4]) = 0xfff10000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xfff10000; +- *((int*)& __m256_op0[0]) = 0xfff10000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000001ffe2000; +- *((unsigned long*)& __m256i_op1[2]) = 0x001fe020001fe020; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000001ffe2000; +- *((unsigned long*)& __m256i_op1[0]) = 0x001fe020001fe020; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff0020ff1f001f; +- *((unsigned long*)& __m256i_result[2]) = 0xffe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff0020ff1f001f; +- *((unsigned long*)& __m256i_result[0]) = 0xffe1ffe0ffe1ffe0; +- __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100f000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100f000ff; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_q(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vmini_hu(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fe200000fe1f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fe200000fe1f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000005; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x80000000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8101010181010101; +- *((unsigned long*)& __m128i_result[0]) = 0x8101010181010101; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128d_op1[1]) = 0x8101010181010101; +- *((unsigned long*)& __m128d_op1[0]) = 0x8101010181010101; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x80000000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8101010181010101; +- *((unsigned long*)& __m128i_op1[0]) = 0x8101010181010101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xc0808000c0808000; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc0808000c0808000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xc080800000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xc080800000000000; +- __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc0808000c0808000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000003020302; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x001ffffe00200000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x001ffffe00200000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff0020ff1f001f; +- *((unsigned long*)& __m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff0020ff1f001f; +- *((unsigned long*)& __m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_result[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_result[2]) = 0xffe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_result[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_result[0]) = 0xffe1ffe0ffe1ffe0; +- __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8101010181010101; +- *((unsigned long*)& __m128i_op1[0]) = 0x8101010181010101; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7efefefe82010201; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ff0000ff; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7efefefe82010201; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000fe200000fe1f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fe200000fe1f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x001ffffe00200000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x001ffffe00200000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fe200000fe1f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fe200000fe1f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xfffffe20; +- *((int*)& __m256_op0[6]) = 0x001dfe1f; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xfffffe20; +- *((int*)& __m256_op0[2]) = 0x001dfe1f; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7efefefe82010201; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x7afafaf88a050a05; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc080800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc080800000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7efefefe82010201; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x418181017dfefdff; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7efefefe82010201; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x418181017dfefdff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff81; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffff81; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff7c; +- __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_op1[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op1[0]) = 0xffe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x2b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000003020302; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffff81; +- *((unsigned long*)& __m128i_result[1]) = 0x00000c0c00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; +- __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x001ffffe00200000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x001ffffe00200000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0020001d001f; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- long_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000a00000009; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_q(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000c0c00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_op0[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_result[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_result[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; +- __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffc0ff80; +- *((int*)& __m128_op1[2]) = 0xff800000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffc0ff80ff800000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000c0c00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_wu_d(__m128i_op0,__m128i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000c0c00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffc00000ff800000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff0000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsat_b(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffc00000ff800000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0xffffffff; +- *((int*)& __m128_result[0]) = 0xffffffff; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; +- __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe20; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001dfffffe1f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000c0c00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc0ff80ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x3); +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xfffffe20; +- *((int*)& __m256_op0[5]) = 0x0000001d; +- *((int*)& __m256_op0[4]) = 0xfffffe1f; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffe20001dfe1f; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff0000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000005; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00003fe000000000; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000190; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffc0ff80ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; +- __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00003fe0; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00003fe0; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00003fe0; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00003fe0; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001400000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001400000000; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvbitrev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256d_op0[2]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256d_op0[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256d_op0[0]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00003fe000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00003fe000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m128d_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_op0[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_result[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; +- __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vsll_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_op0[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ff1f001f; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffe1ffe0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ff1f001f; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffe1ffe0; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffc020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffc020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001400000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001400000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_op1[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_op1[1]) = 0x01ff0020ff1f001f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007fc0083fc7c007; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007fc0083fc7c007; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x42); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000b0000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000b0000000b; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmini_d(__m256i_op0,-1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x003f60041f636003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x003f60041f636003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x003f60041f636003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x003f60041f636003; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x003f60041f636003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x003f60041f636003; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrlni_h_w(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x007fc0083fc7c007; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x007fc0083fc7c007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffc0003fffc0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffc0003fffc0; +- __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010100000101; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010100000101; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000020000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000020000; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x003f60041f636003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x003f60041f636003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; +- __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x003f60041f636003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x003f60041f636003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000003f00001f63; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000003f00001f63; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000020000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101030101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101030101; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffe1; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffe1; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffe1; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffe1; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffe1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101010101030101; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010101030101; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000fffa0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffa0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0101000101010001; +- __m128i_out = __lsx_vsrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x01010001; +- *((int*)& __m128_op0[0]) = 0x01010001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00020000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00020000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00020000; +- *((int*)& __m128_result[1]) = 0x01010001; +- *((int*)& __m128_result[0]) = 0x01010001; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff10; +- __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffc0003fffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffc0003fffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x007fc0083fc7c007; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x007fc0083fc7c007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f010700c70106; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007f010700c70106; +- __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000000fffa0000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000fffa0000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfrecip_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256d_op0[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256d_op0[0]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000008; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffc0003fffc0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffc0003fffc0; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; +- __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x803f6004; +- *((int*)& __m256_op2[4]) = 0x1f636003; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x803f6004; +- *((int*)& __m256_op2[0]) = 0x1f636003; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x803f6004; +- *((int*)& __m256_result[4]) = 0x1f636003; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x803f6004; +- *((int*)& __m256_result[0]) = 0x1f636003; +- __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ff0000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ff0000000000; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00002000; +- *((int*)& __m128_op0[2]) = 0x00002000; +- *((int*)& __m128_op0[1]) = 0x10000000; +- *((int*)& __m128_op0[0]) = 0x10000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000002; +- *((int*)& __m256_op0[4]) = 0x00000008; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000002; +- *((int*)& __m256_op0[0]) = 0x00000008; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x64800000; +- *((int*)& __m256_result[4]) = 0x64000000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x64800000; +- *((int*)& __m256_result[0]) = 0x64000000; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x71); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f010700c70106; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f010700c70106; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0106010601060106; +- *((unsigned long*)& __m256i_result[2]) = 0x0106010601060106; +- *((unsigned long*)& __m256i_result[1]) = 0x0106010601060106; +- *((unsigned long*)& __m256i_result[0]) = 0x0106010601060106; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x803f6004; +- *((int*)& __m256_op0[4]) = 0x1f636003; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x803f6004; +- *((int*)& __m256_op0[0]) = 0x1f636003; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x007f0107; +- *((int*)& __m256_op1[4]) = 0x00c70106; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x007f0107; +- *((int*)& __m256_op1[0]) = 0x00c70106; +- *((int*)& __m256_result[7]) = 0x7fc00000; +- *((int*)& __m256_result[6]) = 0x7fc00000; +- *((int*)& __m256_result[5]) = 0xbeff7cfd; +- *((int*)& __m256_result[4]) = 0x5e123f94; +- *((int*)& __m256_result[3]) = 0x7fc00000; +- *((int*)& __m256_result[2]) = 0x7fc00000; +- *((int*)& __m256_result[1]) = 0xbeff7cfd; +- *((int*)& __m256_result[0]) = 0x5e123f94; +- __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0106010601060106; +- *((unsigned long*)& __m256i_op0[2]) = 0x0106010601060106; +- *((unsigned long*)& __m256i_op0[1]) = 0x0106010601060106; +- *((unsigned long*)& __m256i_op0[0]) = 0x0106010601060106; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00011ffb0000bee1; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00011ffb0000bee1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001010600000106; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001010600000106; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000200000002000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1000000010000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0103000201030002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x3f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001010600000106; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001010600000106; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0103000201030002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_d(__m128i_op0,7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0103000201030002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000020000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101000101010001; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000fe0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff00ffffff00ff; +- __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00011ffb0000bee1; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00011ffb0000bee1; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000003f003f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000003f003f; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000002; +- *((int*)& __m256_op0[4]) = 0x00000008; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000002; +- *((int*)& __m256_op0[0]) = 0x00000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f010700c70106; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f010700c70106; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000010211921; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000010211921; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_result[3]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_result[2]) = 0xff00000200000008; +- *((unsigned long*)& __m256i_result[1]) = 0xffffff00ffffff00; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000200000008; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; +- __m128i_out = __lsx_vslei_hu(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000010100fe0101; +- *((unsigned long*)& __m128i_op2[0]) = 0xffff0200ffff01ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffc0ffc1; +- *((unsigned long*)& __m256i_op0[2]) = 0x003f00000000003f; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffc0ffc1; +- *((unsigned long*)& __m256i_op0[0]) = 0x003f00000000003f; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001fffe0001ffc0; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0001003e; +- *((unsigned long*)& __m256i_result[1]) = 0x0001fffe0001ffc0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0001003e; +- __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000002ffffffff; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007f000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x007f000000000000; +- __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000002; +- *((int*)& __m256_op1[4]) = 0x00000008; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000002; +- *((int*)& __m256_op1[0]) = 0x00000008; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000010100fe0101; +- *((unsigned long*)& __m128d_op0[0]) = 0xffff0200ffff01ff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0x0001010100fe0100; +- *((unsigned long*)& __m128d_result[0]) = 0xffff0200ffff01ff; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000008; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000003fe0000141e; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffc01ffffebe2; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_d(__m128i_op0,-11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0xffffffffffffffff; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x1); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000004; +- *((int*)& __m128_op1[0]) = 0x55555555; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00011ffb0000bee1; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00011ffb0000bee1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00011ffb0000bee1; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00011ffb0000bee1; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000055555555; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplve0_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000002ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000017fffffff; +- __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007ffff001000300; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff0001000300; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_result[0]) = 0xf0003000f0003000; +- __m128i_out = __lsx_vslli_h(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_op1[0]) = 0xf0003000f0003000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000017fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000800000; +- *((unsigned long*)& __m128i_result[0]) = 0x003fffffff800000; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ff0000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000455555555; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; +- __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff010300ff0103; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x555500adfffc5cab; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010100000100; +- __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x010003f00000ff00; +- *((unsigned long*)& __m128i_op0[0]) = 0x017f03000000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x010003f00000ff00; +- *((unsigned long*)& __m128i_op1[0]) = 0x017f03000000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000020; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000020; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x42800000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x42000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x42800000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x42000000; +- __m256_out = __lasx_xvffint_s_wu(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000017; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000017; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x01010101; +- *((int*)& __m128_op0[0]) = 0x00000100; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0xc2fa0000; +- *((int*)& __m128_result[0]) = 0xc30d0000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000020; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000020; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffc0000000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000455555555; +- *((unsigned long*)& __m128i_result[1]) = 0xffc0000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffc0000000000004; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003fffffff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000455555555; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000455555555; +- __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x007f00ff007f00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x007f00ff007f00ff; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_w(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0xc9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000158; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000158; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001580000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbsll_v(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0x0101ffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0x0101ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000455555555; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000001580000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffa800000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000157; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ac; +- __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000157; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00067fff00047fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00027fff000080fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x00067fff00047fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00027fff000080fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x067f047f027f0080; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x067f047f027f0080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000015800000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvslei_bu(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010058; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010058; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000158; +- __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000158; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xffffffa8; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010058; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010058; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000158; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000010; +- *((int*)& __m128_op0[2]) = 0x00100010; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000158; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x79); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010058; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010058; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000100010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010058; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001001100110068; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_w(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001001100110068; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001001100110068; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001001100110067; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001001100110068; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; +- __m128i_out = __lsx_vfclass_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_hu(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000fef0ff0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000fef0ff0; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandi_b(__m128i_op0,0xbd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000040004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000040004; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffint_d_lu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007f7f7f80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007f7f7f80; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x82); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000040000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000040000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000400; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000400; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000200; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000fef0ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000fef0ff0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x687a8373f249bc44; +- *((unsigned long*)& __m128i_op0[0]) = 0x7861145d9241a14a; +- *((unsigned long*)& __m128i_result[1]) = 0x0101000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0101030100010001; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_result[2]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_result[1]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_result[0]) = 0xfff1fff1fff1fff1; +- __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_result[3]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_result[2]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_result[1]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_result[0]) = 0xfff1fff1fff1fff1; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000020006; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000600; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000e000e000e000e; +- __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff1fff1fff1fff1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000040000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000040000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000020000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000020000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x39); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0101000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0101030100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0080800000008000; +- *((unsigned long*)& __m128i_result[0]) = 0x0080818000008000; +- __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000e000e; +- __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x01010001; +- *((int*)& __m128_op1[2]) = 0x00010001; +- *((int*)& __m128_op1[1]) = 0x01010301; +- *((int*)& __m128_op1[0]) = 0x00010001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000040000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000040000; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000040000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000040000; +- __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00040000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00040000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m128i_result[0]) = 0xf8f8f8f8f8f8f8f8; +- __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000e000e000e000e; +- __m256i_out = __lasx_xvsat_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_result[3]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000e0000000e00; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000e000e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0101000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101030100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x2000200020002000; +- *((unsigned long*)& __m128i_result[0]) = 0x2000200020002000; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_hu(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_result[2]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_result[1]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_result[0]) = 0x0007000700070007; +- __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00010001; +- *((int*)& __m128_op0[2]) = 0x00010001; +- *((int*)& __m128_op0[1]) = 0x00010001; +- *((int*)& __m128_op0[0]) = 0x00010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrml_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_op0[2]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_op0[1]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000e000e000e000e; +- *((unsigned long*)& __m256i_result[3]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_result[2]) = 0x000a800b000a800b; +- *((unsigned long*)& __m256i_result[1]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_result[0]) = 0x000a800b000a800b; +- __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x000e000e; +- *((int*)& __m256_op1[4]) = 0x000e000e; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x000e000e; +- *((int*)& __m256_op1[0]) = 0x000e000e; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0x98); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m128d_op1[0]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; +- __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_op0[2]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_op0[1]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_op0[0]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0007000700070007; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m128i_op2[0]) = 0xf8f8f8f8f8f8f8f8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_op1[2]) = 0x000a800b000a800b; +- *((unsigned long*)& __m256i_op1[1]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_op1[0]) = 0x000a800b000a800b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000018803100188; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000018803100188; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000014402080144; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000affff800b; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000affff800b; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000affff800b; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000affff800b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000800; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000800; +- __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000018803100188; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000018803100188; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_op0[2]) = 0x000a800b000a800b; +- *((unsigned long*)& __m256i_op0[1]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_op0[0]) = 0x000a800b000a800b; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000a0080000b00; +- *((unsigned long*)& __m256i_result[2]) = 0x00000a0080000b00; +- *((unsigned long*)& __m256i_result[1]) = 0x00000a0080000b00; +- *((unsigned long*)& __m256i_result[0]) = 0x00000a0080000b00; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_op0[2]) = 0x000a800b000a800b; +- *((unsigned long*)& __m256i_op0[1]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_op0[0]) = 0x000a800b000a800b; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000000e; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000000e; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000c; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000e; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000e; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000440800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000440800; +- __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_op0[2]) = 0x000a800b000a800b; +- *((unsigned long*)& __m256i_op0[1]) = 0x0003800400038004; +- *((unsigned long*)& __m256i_op0[0]) = 0x000a800b000a800b; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000e0010000e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000e0010000e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x4e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvmskgez_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0010000e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0010000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; +- __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0707070707070707; +- *((unsigned long*)& __m256i_result[2]) = 0x0707070707070707; +- *((unsigned long*)& __m256i_result[1]) = 0x0707070707070707; +- *((unsigned long*)& __m256i_result[0]) = 0x0707070707070707; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x27b9331b8e77ead9; +- *((unsigned long*)& __m128i_op0[0]) = 0x58d6bf1867ace738; +- *((unsigned long*)& __m128i_result[1]) = 0xe4cc6c9edfab6639; +- *((unsigned long*)& __m128i_result[0]) = 0x5afc6163b39ce19e; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000014402080144; +- __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffff800; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffff800; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000fffff800; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000fffff800; +- *((unsigned long*)& __m256i_result[3]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_result[2]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_result[1]) = 0xf800f800f800f800; +- *((unsigned long*)& __m256i_result[0]) = 0xf800f800f800f800; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x5); +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffff800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffff800; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000002080100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000002080100; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000001880310877e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000001880310877e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002080100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002080100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000008000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000a080100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000008000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000a080100; +- __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; +- __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; +- __m128i_out = __lsx_vhsubw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffbfffffff8; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffbfffffff8; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010800; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fffff800; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fffff800; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001010800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001010800; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x07ffffff07ffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x07ffffff07ffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x07ffffff07ffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x07ffffff07ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0x0ffffffe0ffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0x0ffffffe0ffffffe; +- __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000a0010400a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000a0010400a; +- __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff00000000; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000007f007f007f; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xdd6156076967d8c9; +- *((unsigned long*)& __m128i_op0[0]) = 0x2e3ab5266375e71b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[1]) = 0x6eb12b0634b46c67; +- *((unsigned long*)& __m128i_result[0]) = 0x171d5a9531bb7390; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010800; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010800; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000014402080144; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000014402080144; +- __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000b; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000002070145; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000002070145; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xfffffffc; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xfffffffc; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xfffffffc; +- *((int*)& __m128_result[1]) = 0xffffffff; +- *((int*)& __m128_result[0]) = 0xfffffffc; +- __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000400000004; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x1ab6021f72496458; +- *((unsigned long*)& __m128i_op1[0]) = 0x7750af4954c29940; +- *((unsigned long*)& __m128i_result[1]) = 0xe64afee18eb79ca8; +- *((unsigned long*)& __m128i_result[0]) = 0x89b051b7ac3e67c0; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffdc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000400000004; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffbffffffd8; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffbfffffff8; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000008000b; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000008000b; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000b; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1ab6021f72496458; +- *((unsigned long*)& __m128i_op0[0]) = 0x7750af4954c29940; +- *((unsigned long*)& __m128i_op1[1]) = 0x1ab6021f72496458; +- *((unsigned long*)& __m128i_op1[0]) = 0x7750af4954c29940; +- *((unsigned long*)& __m128i_result[1]) = 0x6ad8ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x6ad8ffffffffffff; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000008000b; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000008000b; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000b; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000008000a; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000000a; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000008000a; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000a; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010800; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010800; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffefef800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffefef800; +- __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; +- __m128i_out = __lsx_vmskltz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_d(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffefef800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffefef800; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffefef800; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffefef800; +- *((unsigned long*)& __m256i_result[3]) = 0x0000008000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffefef800; +- *((unsigned long*)& __m256i_result[1]) = 0x0000008000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffefef800; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x27); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000010000000; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000001f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000001f; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffff80; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffff80; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000430207f944; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000001f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000001f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000001f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000001f; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x000000200000001e; +- *((unsigned long*)& __m128i_result[0]) = 0x000000200000001e; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x38); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000001f; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000001f; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x0000001f; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x0000001f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m128i_result[0]) = 0xff01ff01ff01fc10; +- __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000001f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000001f; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x403f000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x403f000000000000; +- __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x45); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000003; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000003; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000080; +- __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x7e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ffffffe00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ffffffe00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007f00ff00ff00fe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,-10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007f00ff00ff00fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x7ffffffe00000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x7ffffffe00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007f00ff00ff00fe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x1f9689fdb16cabbd; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x1f9689fdb16cabbd; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ffffffe00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ffffffe00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff0000; +- __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0xcd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000007f007f007f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0af57272788754ab; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000005e80; +- *((unsigned long*)& __m256i_op1[1]) = 0x0af57272788754ab; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000005e80; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000f0f0f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f0000007f; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000f0f0f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f0000007f; +- __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ffffffe00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7ffffffe00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x3a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; +- __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_bu(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128d_op0[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128d_op1[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128d_op1[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0008; +- __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_bu(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000017ffeffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000017ffeffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff0100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff0100000001; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op2[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op2[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffff00018d8b; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffff0100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffff0100000001; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x7); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff00007fff0000; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000001; +- *((int*)& __m128_op0[2]) = 0x7ffeffff; +- *((int*)& __m128_op0[1]) = 0x00000001; +- *((int*)& __m128_op0[0]) = 0x7ffeffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x003f0000003f0000; +- *((unsigned long*)& __m128i_result[0]) = 0x003f0000003f0000; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffff0100000001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffff0100000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_bu(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x003f0000003f0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x003f0000003f0000; +- *((unsigned long*)& __m128i_result[1]) = 0x803e0000803e0000; +- *((unsigned long*)& __m128i_result[0]) = 0x803e0000803e0000; +- __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff0008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff0008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; +- __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000bdfef907bc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000bdfef907bc; +- __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x803e0000803e0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x803e0000803e0000; +- *((unsigned long*)& __m128i_result[1]) = 0x803bfffd803bfffd; +- *((unsigned long*)& __m128i_result[0]) = 0x803bfffd803bfffd; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0010511c54440437; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0010511c54440437; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff0008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff0008; +- int_result = 0x0000000000000000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffff0008; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffff0008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffff0008; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffff0008; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x000000430207f944; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_result[2]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0100010001000100; +- __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000008080800; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000008080800; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010511c54440437; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010511c54440437; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000103fca1bd; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000103fca1bd; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000103fca1bd; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000103fca1bd; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0010511c54440438; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0010511c54440438; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1d8000001d800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1d8000001d800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1d8000001d800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1d8000001d800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0366000003660000; +- *((unsigned long*)& __m128i_result[0]) = 0x0366000003660000; +- __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000bdfef907bc; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000bdfef907bc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,-9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffe0000fffe0000; +- *((unsigned long*)& __m128i_result[1]) = 0x7777777777777777; +- *((unsigned long*)& __m128i_result[0]) = 0xffff7777ffff7777; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x77); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x000000bd; +- *((int*)& __m256_op0[4]) = 0xfef907bc; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x000000bd; +- *((int*)& __m256_op0[0]) = 0xfef907bc; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x62d2acee; +- *((int*)& __m256_result[4]) = 0x7fc00000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x62d2acee; +- *((int*)& __m256_result[0]) = 0x7fc00000; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[2]) = 0x0100004300000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100004300000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[2]) = 0xff0000bd00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[0]) = 0xff0000bd00000000; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x01000100; +- *((int*)& __m256_op0[6]) = 0x01000100; +- *((int*)& __m256_op0[5]) = 0x01000100; +- *((int*)& __m256_op0[4]) = 0x01000100; +- *((int*)& __m256_op0[3]) = 0x01000100; +- *((int*)& __m256_op0[2]) = 0x01000100; +- *((int*)& __m256_op0[1]) = 0x01000100; +- *((int*)& __m256_op0[0]) = 0x01000100; +- *((int*)& __m256_op1[7]) = 0x7f800000; +- *((int*)& __m256_op1[6]) = 0x7f800000; +- *((int*)& __m256_op1[5]) = 0x62d2acee; +- *((int*)& __m256_op1[4]) = 0x7fc00000; +- *((int*)& __m256_op1[3]) = 0x7f800000; +- *((int*)& __m256_op1[2]) = 0x7f800000; +- *((int*)& __m256_op1[1]) = 0x62d2acee; +- *((int*)& __m256_op1[0]) = 0x7fc00000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000043; +- *((int*)& __m256_op0[4]) = 0x0207f944; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000043; +- *((int*)& __m256_op0[0]) = 0x0207f944; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x8c7fc73a; +- *((int*)& __m128_op0[2]) = 0x137e54af; +- *((int*)& __m128_op0[1]) = 0xbc84cf6f; +- *((int*)& __m128_op0[0]) = 0x76208329; +- *((int*)& __m128_result[3]) = 0x7fc00000; +- *((int*)& __m128_result[2]) = 0x297f29fe; +- *((int*)& __m128_result[1]) = 0x7fc00000; +- *((int*)& __m128_result[0]) = 0x5acab5a5; +- __m128_out = __lsx_vfsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[2]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_result[2]) = 0xff00010001000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_result[0]) = 0xff00010001000100; +- __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x7b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x22); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[2]) = 0x0100004300000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[0]) = 0x0100004300000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op2[2]) = 0xff00010001000100; +- *((unsigned long*)& __m256i_op2[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op2[0]) = 0xff00010001000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_result[2]) = 0x01ffff4300ffff00; +- *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_result[0]) = 0x01ffff4300ffff00; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x80008000ec82ab51; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000800089e08000; +- int_result = 0xffffffff89e08000; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010511c54440438; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010511c54440438; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000777777777777; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff7777ffff7777; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000003bbbbbbbbbb; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x45); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000430207f944; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000086000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00040ff288000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000086000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00040ff288000000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000777777777777; +- *((unsigned long*)& __m128d_op0[0]) = 0xffff7777ffff7777; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000001b; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op1[2]) = 0x01ffff4300ffff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op1[0]) = 0x01ffff4300ffff00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000008000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000008000000100; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x3f800000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_w(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00010001000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00010001000100; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffc0800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000008080600; +- __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x3f800000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000086000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00040ff288000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000086000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00040ff288000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x5555555555555555; +- *((unsigned long*)& __m256i_op1[2]) = 0x5555555555555555; +- *((unsigned long*)& __m256i_op1[1]) = 0x5555555555555555; +- *((unsigned long*)& __m256i_op1[0]) = 0x5555555555555555; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fc300000fc40; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fc300000fc40; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x5555555555555555; +- *((unsigned long*)& __m256i_op0[2]) = 0x5555555555555555; +- *((unsigned long*)& __m256i_op0[1]) = 0x5555555555555555; +- *((unsigned long*)& __m256i_op0[0]) = 0x5555555555555555; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x4545454545454545; +- *((unsigned long*)& __m256i_result[2]) = 0x4545454545454545; +- *((unsigned long*)& __m256i_result[1]) = 0x4545454545454545; +- *((unsigned long*)& __m256i_result[0]) = 0x4545454545454545; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x4d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00010001000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00010001000100; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000004000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x5a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplve0_q(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[2]) = 0x01ffff4300ffff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[0]) = 0x01ffff4300ffff00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff00000000; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000fc300000fc40; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000fc300000fc40; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff000003c0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff000003c0; +- __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000008080600; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff000003c0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff000003c0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000fc300000fc40; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000fc300000fc40; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7c030000ffc4; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7c030000ffc4; +- __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7ffeffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7ffeffffffff; +- __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0x7fff7ffe; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x7fff7ffe; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000002; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000002; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000002; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000002; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0xffffffff; +- *((int*)& __m256_op2[4]) = 0xffffffff; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0xffffffff; +- *((int*)& __m256_op2[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7ffeffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7ffeffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000fc300000fc40; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000fc300000fc40; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f007bfffffffb; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007f007bfffffffb; +- __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x01ffff4300fffeff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfe0000bcff000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x01ffff4300fffeff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfe0000bcff000100; +- *((unsigned long*)& __m256i_result[3]) = 0x81ff00bd80ff0101; +- *((unsigned long*)& __m256i_result[2]) = 0x01ff00bd00ff0101; +- *((unsigned long*)& __m256i_result[1]) = 0x81ff00bd80ff0101; +- *((unsigned long*)& __m256i_result[0]) = 0x01ff00bd00ff0101; +- __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; +- int_op1 = 0xffffffff89e08000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001b0000001b; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001b0000001b; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc0800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007fff0018; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0000001b; +- *((int*)& __m128_op0[2]) = 0x0000001b; +- *((int*)& __m128_op0[1]) = 0x0000001b; +- *((int*)& __m128_op0[0]) = 0x0000001b; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x0000001b; +- *((int*)& __m128_result[2]) = 0x0000001b; +- *((int*)& __m128_result[1]) = 0x0000001b; +- *((int*)& __m128_result[0]) = 0x0000001b; +- __m128_out = __lsx_vfadd_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op1[2]) = 0x01ffff4300ffff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op1[0]) = 0x01ffff4300ffff00; +- *((unsigned long*)& __m256i_result[3]) = 0xff00ff003f003f00; +- *((unsigned long*)& __m256i_result[2]) = 0xff0101fd00010100; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ff003f003f00; +- *((unsigned long*)& __m256i_result[0]) = 0xff0101fd00010100; +- __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff003f003f00; +- *((unsigned long*)& __m256i_op0[2]) = 0xff0101fd00010100; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff003f003f00; +- *((unsigned long*)& __m256i_op0[0]) = 0xff0101fd00010100; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00ff003f003f00; +- *((unsigned long*)& __m256i_op1[2]) = 0xff0101fd00010100; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00ff003f003f00; +- *((unsigned long*)& __m256i_op1[0]) = 0xff0101fd00010100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xff00ff00; +- *((int*)& __m256_op0[6]) = 0x3f003f00; +- *((int*)& __m256_op0[5]) = 0xff0101fd; +- *((int*)& __m256_op0[4]) = 0x00010100; +- *((int*)& __m256_op0[3]) = 0xff00ff00; +- *((int*)& __m256_op0[2]) = 0x3f003f00; +- *((int*)& __m256_op0[1]) = 0xff0101fd; +- *((int*)& __m256_op0[0]) = 0x00010100; +- *((int*)& __m256_op1[7]) = 0x01ffff43; +- *((int*)& __m256_op1[6]) = 0x00fffeff; +- *((int*)& __m256_op1[5]) = 0xfe0000bc; +- *((int*)& __m256_op1[4]) = 0xff000100; +- *((int*)& __m256_op1[3]) = 0x01ffff43; +- *((int*)& __m256_op1[2]) = 0x00fffeff; +- *((int*)& __m256_op1[1]) = 0xfe0000bc; +- *((int*)& __m256_op1[0]) = 0xff000100; +- *((unsigned long*)& __m256i_result[3]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fc00fc00; +- *((unsigned long*)& __m256i_result[1]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fc00fc00; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x2c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[2]) = 0x01ffff4300ffff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_op0[0]) = 0x01ffff4300ffff00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100000000; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x2e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffc0800000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x6f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc0800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4545454545454545; +- *((unsigned long*)& __m256i_op0[2]) = 0x4545454545454545; +- *((unsigned long*)& __m256i_op0[1]) = 0x4545454545454545; +- *((unsigned long*)& __m256i_op0[0]) = 0x4545454545454545; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000001b; +- __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000001b0000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000001b0000; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000fc300000fc40; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000fc300000fc40; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000001b; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000001b0000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000001b0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000001b001b; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xc0800000; +- *((int*)& __m128_op1[3]) = 0x0000001b; +- *((int*)& __m128_op1[2]) = 0x0000001b; +- *((int*)& __m128_op1[1]) = 0x0000001b; +- *((int*)& __m128_op1[0]) = 0x0000001b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x007f007bfffffffb; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x007f007bfffffffb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000010000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000010000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffc0800000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffc0800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffc0800000; +- __m128i_out = __lsx_vmaddwod_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000007fff0018; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000003fff800c; +- __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000010000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000010000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffeffff10000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffeffff10000000; +- __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fc00fc00; +- *((unsigned long*)& __m256i_op1[1]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fc00fc00; +- *((unsigned long*)& __m256i_result[3]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fc00fc00; +- *((unsigned long*)& __m256i_result[1]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fc00fc00; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffeffff10000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffeffff10000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfc003802; +- *((int*)& __m256_op0[6]) = 0xfc000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xfc00fc00; +- *((int*)& __m256_op0[3]) = 0xfc003802; +- *((int*)& __m256_op0[2]) = 0xfc000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xfc00fc00; +- *((int*)& __m256_result[7]) = 0x82ff902d; +- *((int*)& __m256_result[6]) = 0x83000000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x82fe0bd9; +- *((int*)& __m256_result[3]) = 0x82ff902d; +- *((int*)& __m256_result[2]) = 0x83000000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x82fe0bd9; +- __m256_out = __lasx_xvfrecip_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0018; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vclz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffeffff10000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffeffff10000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7ffffffffffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7ffffffffffffffe; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x03802fc000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x03802fc000000000; +- __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_wu(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xd5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; +- __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xfc003802fc000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x7ffffffffffffffe; +- *((unsigned long*)& __m256d_op1[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x7ffffffffffffffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff00010001; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff00010001; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xd2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x82ff902d83000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f80000082fe0bd9; +- *((unsigned long*)& __m256i_op0[1]) = 0x82ff902d83000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f80000082fe0bd9; +- *((unsigned long*)& __m256i_op1[3]) = 0x82ff902d83000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f80000082fe0bd9; +- *((unsigned long*)& __m256i_op1[1]) = 0x82ff902d83000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f80000082fe0bd9; +- *((unsigned long*)& __m256i_result[3]) = 0xc008fa01c0090000; +- *((unsigned long*)& __m256i_result[2]) = 0x3f804000c008f404; +- *((unsigned long*)& __m256i_result[1]) = 0xc008fa01c0090000; +- *((unsigned long*)& __m256i_result[0]) = 0x3f804000c008f404; +- __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x03802fc000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x03802fc000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; +- __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ffffffffffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ffffffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x82ff902d83000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f80000082fe0bd9; +- *((unsigned long*)& __m256i_op1[1]) = 0x82ff902d83000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f80000082fe0bd9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000001; +- __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc008fa01c0090000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3f804000c008f404; +- *((unsigned long*)& __m256i_op0[1]) = 0xc008fa01c0090000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3f804000c008f404; +- *((unsigned long*)& __m256i_op1[3]) = 0x82ff902d83000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f80000082fe0bd9; +- *((unsigned long*)& __m256i_op1[1]) = 0x82ff902d83000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f80000082fe0bd9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xc0090000c0200060; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xc0090000c0200060; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc0090000c0200060; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc0090000c0200060; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f0000007f0060; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007f0000007f0060; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; +- __m128i_out = __lsx_vsadd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc008fa01c0090000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3f804000c008f404; +- *((unsigned long*)& __m256i_op0[1]) = 0xc008fa01c0090000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3f804000c008f404; +- *((unsigned long*)& __m256i_result[3]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_result[2]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_result[1]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_result[0]) = 0x001fc0200060047a; +- __m256i_out = __lasx_xvsrai_d(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x03802fc000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x03802fc000000000; +- *((int*)& __m256_result[7]) = 0x38600000; +- *((int*)& __m256_result[6]) = 0x3df80000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x38600000; +- *((int*)& __m256_result[2]) = 0x3df80000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvtl_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000400028000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0xd9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_result[2]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_result[1]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_result[0]) = 0x001fc0200060047a; +- __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f0000007f0060; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f0000007f0060; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f0000007f0060; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007f0000007f0060; +- __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000400028000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0x000000020001c020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000022; +- __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_result[3]) = 0xfee1057c01e10581; +- *((unsigned long*)& __m256i_result[2]) = 0x011ec1210161057b; +- *((unsigned long*)& __m256i_result[1]) = 0xfee1057c01e10581; +- *((unsigned long*)& __m256i_result[0]) = 0x011ec1210161057b; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002008360500088; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000400028000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_result[3]) = 0x047a047a047a047a; +- *((unsigned long*)& __m256i_result[2]) = 0x047a047a047a047a; +- *((unsigned long*)& __m256i_result[1]) = 0x047a047a047a047a; +- *((unsigned long*)& __m256i_result[0]) = 0x047a047a047a047a; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002008360500088; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000008; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000c; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,-7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ca0000fff80000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00ca0000fff80000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000c; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff3; +- __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f0000007f0060; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f0000007f0060; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- int_result = 0xffffffffffffffff; +- int_out = __lsx_vpickve2gr_h(__m128i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x55); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f0000007f0060; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f0000007f0060; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00f7000000f70006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00f7000000f70006; +- __m256i_out = __lasx_xvrotri_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op0[2]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op0[0]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fffe00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fffe00000000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xfffffff3; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000008; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000088; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000008; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000088; +- __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fffe00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fffe00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ca0000fff80000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ca0000fff80000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x386000003df80000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x386000003df80000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x36); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fffe00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fffe00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x5fa0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x5fa0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x386000003df80000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ca0000fff80000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ca0000fff80000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x381800007af80000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x381800007af80000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002008300500088; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000088; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0f00204000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0f00204000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xf3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x5fa00000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x5fa00000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op1[2]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffe0047d00e00480; +- *((unsigned long*)& __m256i_op1[0]) = 0x001fc0200060047a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xe07de0801f20607a; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00f3009500db00ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00f3009500db00ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000003cc0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000003cc0; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x6a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x0c6a2400; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x0f002040; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x0c6a2400; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x0f002040; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x5fa0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x5fa0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0f00204000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0f00204000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x04a3000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x04a3000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0f00204000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0c6a240000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0f00204000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x52525252; +- *((int*)& __m128_op0[2]) = 0xadadadad; +- *((int*)& __m128_op0[1]) = 0x52525252; +- *((int*)& __m128_op0[0]) = 0xadadadad; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0xadadadad; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0xadadadad; +- __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrintrne_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; +- __m128i_out = __lsx_vslli_w(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003cc0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003cc0; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000003cc0; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000003cc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x5fa0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x5fa0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003cc0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003cc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[2]) = 0x000000081f20607a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[0]) = 0x000000081f20607a; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000adadadad; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000adadadad; +- *((unsigned long*)& __m128i_result[1]) = 0xfbfbfbfbadadadad; +- *((unsigned long*)& __m128i_result[0]) = 0xfbfbfbfbadadadad; +- __m128i_out = __lsx_vmini_b(__m128i_op0,-5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x52525252adadadad; +- *((unsigned long*)& __m128i_op1[0]) = 0x52525252adadadad; +- *((unsigned long*)& __m128i_result[1]) = 0x52525252adadadad; +- *((unsigned long*)& __m128i_result[0]) = 0x52525252adadadad; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000adadadad; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000adadadad; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000adadadad; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000adadadad; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x800000007fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x800000007fffffff; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0df9f8e; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0df9f8e; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffe0df9f8e; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffe0df9f8e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00f7000000f70006; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00f7000000f70006; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x52525252adadadad; +- *((unsigned long*)& __m128i_op0[0]) = 0x52525252adadadad; +- *((unsigned long*)& __m128i_op1[1]) = 0x800000007fffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x800000007fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x5fa00000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x5fa00000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000004; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00007f95; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000004; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00007f95; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000adadadad; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000adadadad; +- *((unsigned long*)& __m128i_result[1]) = 0xadadadadadadadad; +- *((unsigned long*)& __m128i_result[0]) = 0xadadadadadadadad; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0df9f8e; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0df9f8e; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffe0df9f8f; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffe0df9f8f; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffffffb; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffffffb; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x52525252adadadad; +- *((unsigned long*)& __m128i_op1[0]) = 0x52525252adadadad; +- *((unsigned long*)& __m128i_op2[1]) = 0x800000007fffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x800000007fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00adadad00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00adadad00000000; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x52525252adadadad; +- *((unsigned long*)& __m128i_op0[0]) = 0x52525252adadadad; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x5b5b5b5aa4a4a4a6; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x5b5b5b5aadadadad; +- __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007fffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x002cffacffacffab; +- *((unsigned long*)& __m128i_result[1]) = 0x0000007f00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vexth_hu_bu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0001fffa; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe00018069; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0001fffa; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe00018069; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff01fffffffeff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff01fffffffaff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff01fffffffeff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff01fffffffaff; +- __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x5b5b5b5aadadadad; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000052525253; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0001fffa; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00018069; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001fffe0001fffa; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00018069; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000002000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000002000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x64); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xe07de080; +- *((int*)& __m256_op0[4]) = 0x1f20607a; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xe07de080; +- *((int*)& __m256_op0[0]) = 0x1f20607a; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xe07de080; +- *((int*)& __m256_op1[4]) = 0x1f20607a; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xe07de080; +- *((int*)& __m256_op1[0]) = 0x1f20607a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000007f00ff00ff; +- *((unsigned long*)& __m128d_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3ff0000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7ffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7ffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7ffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7ffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x3fffffff3ffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x3fffffff3ffffffe; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x800000007fffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x800000007fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x003f0000ffffffff; +- __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7ffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7ffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffe4866c86; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe4866c86; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000002000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000002000000; +- __m128i_out = __lsx_vsrar_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xe07de080; +- *((int*)& __m256_op1[4]) = 0x1f20607a; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xe07de080; +- *((int*)& __m256_op1[0]) = 0x1f20607a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x207fffff22bd04fb; +- *((unsigned long*)& __m128i_op0[0]) = 0x207fffff22bd04fb; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000002000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000002000000; +- *((unsigned long*)& __m128i_result[1]) = 0x207fffff22bd04fa; +- *((unsigned long*)& __m128i_result[0]) = 0x207fffff22bd04fa; +- __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe07de080; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000001f20607a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe07de080; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000001f20607a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_result[3]) = 0xfdfdfdfdfdfdfdfd; +- *((unsigned long*)& __m256i_result[2]) = 0xe27fe2821d226278; +- *((unsigned long*)& __m256i_result[1]) = 0xfdfdfdfdfdfdfdfd; +- *((unsigned long*)& __m256i_result[0]) = 0xe27fe2821d226278; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x1f831f80e0e09f86; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x1f831f80e0e09f86; +- __m256i_out = __lasx_xvdiv_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000003effff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000003effff; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x441ba9fcffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x181b2541ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffff7ffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7ffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff81010102; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfdfdfdfdfdfdfdfd; +- *((unsigned long*)& __m256i_op0[2]) = 0xe27fe2821d226278; +- *((unsigned long*)& __m256i_op0[1]) = 0xfdfdfdfdfdfdfdfd; +- *((unsigned long*)& __m256i_op0[0]) = 0xe27fe2821d226278; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x441ba9fcffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x181b2541ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x401fadf8fbfbfbfb; +- *((unsigned long*)& __m128i_result[0]) = 0x1c1f2145fbfbfbfb; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff0000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff0000ffffffff; +- __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x38); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xe07de0801f20607a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x01ff01ff01c0003e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x01ff01ff01c0003e; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x01ff01ff01c0003e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x01ff01ff01c0003e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000100ff000100ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000100c00000003e; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x441ba9fcffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x181b2541ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xbbe5560400010001; +- *((unsigned long*)& __m128i_result[0]) = 0xe7e5dabf00010001; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbbe5560400010001; +- *((unsigned long*)& __m128i_op0[0]) = 0xe7e5dabf00010001; +- *((unsigned long*)& __m128i_result[1]) = 0x000b000500010001; +- *((unsigned long*)& __m128i_result[0]) = 0x000b000c00010001; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff81010102; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00ff0000; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00ff0000; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00ff0000; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00ff0000; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x7fc0010181020103; +- *((unsigned long*)& __m128i_result[0]) = 0x7fc0ffff81020103; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbbe5560400010001; +- *((unsigned long*)& __m128i_op0[0]) = 0xe7e5dabf00010001; +- *((unsigned long*)& __m128i_op1[1]) = 0xbbe5560400010001; +- *((unsigned long*)& __m128i_op1[0]) = 0xe7e5dabf00010001; +- *((unsigned long*)& __m128i_result[1]) = 0xe7e5560400010001; +- *((unsigned long*)& __m128i_result[0]) = 0xe7e5dabf00010001; +- __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0xf3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xdcec560380000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x08ec7f7f80000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_op2[1]) = 0x32d8f0a905b6c59b; +- *((unsigned long*)& __m128i_op2[0]) = 0x322a52fc2ba83b96; +- *((unsigned long*)& __m128i_result[1]) = 0xaa14efac3bb62636; +- *((unsigned long*)& __m128i_result[0]) = 0xd6c22c8353a80d2c; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_result[1]) = 0x03ff0101fc010102; +- *((unsigned long*)& __m128i_result[0]) = 0x03fffffffc010102; +- __m128i_out = __lsx_vsat_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_result[1]) = 0x00000fffffffe000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000102020204000; +- __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xaa14efac3bb62636; +- *((unsigned long*)& __m128i_op0[0]) = 0xd6c22c8353a80d2c; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000300000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0003000000010000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ff0000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ff0000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x03ff0101fc010102; +- *((unsigned long*)& __m128i_op0[0]) = 0x03fffffffc010102; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff010181010102; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff81010102; +- *((unsigned long*)& __m128i_result[1]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfeffffffffffffff; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x7fff0101; +- *((int*)& __m128_op0[2]) = 0x81010102; +- *((int*)& __m128_op0[1]) = 0x7fffffff; +- *((int*)& __m128_op0[0]) = 0x81010102; +- *((int*)& __m128_op1[3]) = 0x00000fff; +- *((int*)& __m128_op1[2]) = 0xffffe000; +- *((int*)& __m128_op1[1]) = 0x00001020; +- *((int*)& __m128_op1[0]) = 0x20204000; +- *((int*)& __m128_result[3]) = 0x7fff0101; +- *((int*)& __m128_result[2]) = 0xffffe000; +- *((int*)& __m128_result[1]) = 0x7fffffff; +- *((int*)& __m128_result[0]) = 0xa0204000; +- __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00f7000000f70007; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00f7000000f70007; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0xe7e5560400010001; +- *((unsigned long*)& __m128d_op1[0]) = 0xe7e5dabf00010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00800000; +- *((int*)& __m128_op0[0]) = 0x00800000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00800000; +- *((int*)& __m128_op1[0]) = 0x00800000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xfeffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xfeffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000fff; +- *((int*)& __m128_op1[2]) = 0xffffe000; +- *((int*)& __m128_op1[1]) = 0x00001020; +- *((int*)& __m128_op1[0]) = 0x20204000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffefffefffeffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffefffefffeffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; +- __m256i_out = __lasx_xvssub_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000fff; +- *((int*)& __m128_op1[2]) = 0xffffe000; +- *((int*)& __m128_op1[1]) = 0x00001020; +- *((int*)& __m128_op1[0]) = 0x20204000; +- *((int*)& __m128_result[3]) = 0x80000fff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0x80001020; +- *((int*)& __m128_result[0]) = 0xffffffff; +- __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x000100010001fffe; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x000100010001fffe; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000700000004e000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0003000000012020; +- *((unsigned long*)& __m128i_result[1]) = 0x0038000000051fff; +- *((unsigned long*)& __m128i_result[0]) = 0x003c000000022021; +- __m128i_out = __lsx_vabsd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff0000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff0000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskgez_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000005500000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001005500020000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000005500000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001005500020000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000100010001fffe; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000100010001fffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000005500000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000005400000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000005500000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000005400000002; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xfdfcfda8; +- *((int*)& __m256_op0[5]) = 0x0000e282; +- *((int*)& __m256_op0[4]) = 0x1d20ffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xfdfcfda8; +- *((int*)& __m256_op0[1]) = 0x0000e282; +- *((int*)& __m256_op0[0]) = 0x1d20ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000700000004e000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003000000012020; +- *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000000e00a18f5; +- *((unsigned long*)& __m128i_op2[0]) = 0x000000002023dcdc; +- *((unsigned long*)& __m128i_result[1]) = 0x000700000004e000; +- *((unsigned long*)& __m128i_result[0]) = 0x0003000000012020; +- __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff0101ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffa0204000; +- *((unsigned long*)& __m128i_result[1]) = 0x001f7fc100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x001f7fff00000000; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0038000000051fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x003c000000022021; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff0101ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffa0204000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f370101ff04ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7f3bffffa0226021; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000fffffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000102020204000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfefff00000001fff; +- __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000100010001fffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000100010001fffe; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000005500000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001005500020000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000005500000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001005500020000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7fff0101ffffe000; +- *((unsigned long*)& __m128d_op0[0]) = 0x7fffffffa0204000; +- *((unsigned long*)& __m128d_op1[1]) = 0x7f370101ff04ffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x7f3bffffa0226021; +- *((unsigned long*)& __m128d_result[1]) = 0x7fff0101ffffe000; +- *((unsigned long*)& __m128d_result[0]) = 0x7fffffffa0204000; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x003f000400000003; +- *((unsigned long*)& __m128i_result[0]) = 0x003f000400000003; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0effeffefdffa1e0; +- *((unsigned long*)& __m128i_op0[0]) = 0xe6004c5f64284224; +- *((unsigned long*)& __m128i_op1[1]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfeffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000000010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003f000400000003; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f000400000003; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000400004; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000003f0004; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001f7fc100000404; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000002a000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fff0101ffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffa0204000; +- *((unsigned long*)& __m128i_result[1]) = 0xffe1ffc100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000400000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfefff00000001fff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffe1ffc100000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000400000; +- *((unsigned long*)& __m128i_result[1]) = 0xffe1ffc100000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfefff00000401fff; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffe1ffc100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000400000; +- *((int*)& __m128_result[3]) = 0xfffc2000; +- *((int*)& __m128_result[2]) = 0xfff82000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003ef89df07f0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003ec0fc0fbfe001; +- *((unsigned long*)& __m128i_op1[1]) = 0x3ff800ff2fe6c00d; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff40408ece0e0de; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000045340a6; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000028404044; +- *((unsigned long*)& __m128i_op1[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffff000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffff000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffff000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffff000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00f7000000f70006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00f7000000f70006; +- *((unsigned long*)& __m256d_result[3]) = 0x416ee00000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x416ee000c0000000; +- *((unsigned long*)& __m256d_result[1]) = 0x416ee00000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x416ee000c0000000; +- __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000fdfcfda8; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000e2821d20ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000fdfcfda8; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000e2821d20ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff7f810100001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x001fffc0ffffe001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000002259662; +- *((unsigned long*)& __m128i_op1[0]) = 0xc4dbe60354005d25; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f01000000f8ff00; +- __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsubwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000045340a6; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000028404044; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000fffffffe000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000102020204000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x045340a628404044; +- __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0xffffe000; +- *((int*)& __m128_result[0]) = 0xffffe000; +- __m128_out = __lsx_vfcvtl_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff007fff810001; +- *((unsigned long*)& __m128i_op0[0]) = 0x000400530050ffa6; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandi_b(__m256i_op0,0xcc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff007fff810001; +- *((unsigned long*)& __m128i_op0[0]) = 0x000400530050ffa6; +- *((unsigned long*)& __m128i_op1[1]) = 0xff7f810100001000; +- *((unsigned long*)& __m128i_op1[0]) = 0x001fffc0ffffe001; +- *((unsigned long*)& __m128i_result[1]) = 0xff7f810100001000; +- *((unsigned long*)& __m128i_result[0]) = 0x000400530050ffa6; +- __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff000ff6220c0c1; +- *((unsigned long*)& __m128i_op0[0]) = 0xffe8081000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000007ff000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff7f810100001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000400530050ffa6; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff007fff810001; +- *((unsigned long*)& __m128i_op1[0]) = 0x000400530050ffa6; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffff811001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000a1ff4c; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0002a000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x0002a000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000000002a000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000060a3db; +- *((unsigned long*)& __m128i_op0[0]) = 0xa70594c000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ff9f5c25; +- *((unsigned long*)& __m128i_result[0]) = 0x58fa6b4000000000; +- __m128i_out = __lsx_vneg_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000007ff000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000a1ff4c; +- *((unsigned long*)& __m128i_result[1]) = 0x000300037ff000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0003000300a10003; +- __m128i_out = __lsx_vmaxi_h(__m128i_op0,3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x045340a628404044; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x7ff000ff6220c0c1; +- *((unsigned long*)& __m128d_op1[0]) = 0xffe8081000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffff0001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f0000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000300037ff000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0003000300a10003; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_wu_d(__m128i_op0,__m128i_op1,0x3c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7ff000ff6220c0c1; +- *((unsigned long*)& __m128i_op0[0]) = 0xffe8081000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7ff000ff6220c0c1; +- *((unsigned long*)& __m128i_op1[0]) = 0xffe8081000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xb110606000000000; +- __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff9f5c25; +- *((unsigned long*)& __m128i_op0[0]) = 0x58fa6b4000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ff9f5c25; +- *((unsigned long*)& __m128i_op1[0]) = 0x58fa6b4000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000020; +- __m128i_out = __lsx_vclz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_wu(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_result[2]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_result[1]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_result[0]) = 0x0000080800000808; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; +- *((unsigned long*)& __m256i_result[2]) = 0xf3f3f3f3f3f3f3f3; +- *((unsigned long*)& __m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; +- *((unsigned long*)& __m256i_result[0]) = 0xf3f3f3f3f3f3f3f3; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xf3f3f3f3; +- *((int*)& __m256_op0[6]) = 0xf3f3f3f3; +- *((int*)& __m256_op0[5]) = 0xf3f3f3f3; +- *((int*)& __m256_op0[4]) = 0xf3f3f3f3; +- *((int*)& __m256_op0[3]) = 0xf3f3f3f3; +- *((int*)& __m256_op0[2]) = 0xf3f3f3f3; +- *((int*)& __m256_op0[1]) = 0xf3f3f3f3; +- *((int*)& __m256_op0[0]) = 0xf3f3f3f3; +- *((int*)& __m256_op1[7]) = 0xf3f3f3f3; +- *((int*)& __m256_op1[6]) = 0xf3f3f3f3; +- *((int*)& __m256_op1[5]) = 0xf3f3f3f3; +- *((int*)& __m256_op1[4]) = 0xf3f3f3f3; +- *((int*)& __m256_op1[3]) = 0xf3f3f3f3; +- *((int*)& __m256_op1[2]) = 0xf3f3f3f3; +- *((int*)& __m256_op1[1]) = 0xf3f3f3f3; +- *((int*)& __m256_op1[0]) = 0xf3f3f3f3; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000080800000808; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf3f3f3f3f3f3f3f3; +- *((unsigned long*)& __m256i_op0[2]) = 0xf3f3f3f3f3f3f3f3; +- *((unsigned long*)& __m256i_op0[1]) = 0xf3f3f3f3f3f3f3f3; +- *((unsigned long*)& __m256i_op0[0]) = 0xf3f3f3f3f3f3f3f3; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xf3f3f3f3f3f3f4f3; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xf3f3f3f3f3f3f4f3; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff800fff01; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff001ffe02; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x000300037ff000ff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0003000300a10003; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff800fff01; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000007ff000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x1); +- *((unsigned long*)& __m128d_op0[1]) = 0x000300037ff000ff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0003000300a10003; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000007ff000ff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0003000300000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0003000300a10003; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0003000300000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0003000300a10003; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffcfffd00000000; +- __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvth_s_h(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffdfffe80008000; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0xffeffff4; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x000000007ff000ff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffe80008000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe2; +- *((unsigned long*)& __m128i_result[0]) = 0xfffdfffe80007fe2; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf3f3f3f3f3f3f4f3; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf3f3f3f3f3f3f4f3; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000f3f3f4f3; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000f3f3f4f3; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000300037ff000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003000300a10003; +- *((unsigned long*)& __m128i_op1[1]) = 0x000300037ff000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0003000300a10003; +- *((unsigned long*)& __m128i_op2[1]) = 0x000000007ff000ff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff0001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x26); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000007ff000ff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x7ff000ff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrml_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_w(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000004000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000004000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_d(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000040; +- *((int*)& __m256_op0[6]) = 0x00000020; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000040; +- *((int*)& __m256_op0[2]) = 0x00000020; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000004000000020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000004000000020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xf8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_du(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_d_q(__m128i_op0,__m128i_op1,0x58); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff7fffffff7; +- __m256i_out = __lasx_xvmini_w(__m256i_op0,-9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4000400040004000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffff7fffffff7; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff700000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff7fffffff7; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_d(__m256i_op0,10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_h(__m128i_op0,-7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080700000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080700000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffefffe; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fffefffe; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0042003e0042002f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001fffc0001fffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffe0004fffe0004; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffe0004fffe0004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0042003e0042002f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001fffc0001fffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0042003e0042002f; +- *((unsigned long*)& __m128i_result[0]) = 0x0001fffc0001fffc; +- __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffe0004fffe0004; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0xfffe0004fffe0004; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x4b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000007070707; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc1bdceee242070db; +- *((unsigned long*)& __m128i_op0[0]) = 0xe8c7b756d76aa478; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3f433212dce09025; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0042003e0042002f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001fffc0001fffc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffbeffc2ffbeffd1; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0042003e0042002f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001fffc0001fffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0042003e0042002f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001fffc0001fffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0707070707070707; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0707070707070707; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000001fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0018001800180018; +- *((unsigned long*)& __m256i_result[2]) = 0x0018001800180018; +- *((unsigned long*)& __m256i_result[1]) = 0x0018001800180018; +- *((unsigned long*)& __m256i_result[0]) = 0x0018001800180018; +- __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_result[1]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0xc2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000001fffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000001ffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000001ffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; +- __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_hu(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op1[2]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op1[1]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op1[0]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x2c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0018001800180018; +- *((unsigned long*)& __m256i_op1[2]) = 0x0018001800180018; +- *((unsigned long*)& __m256i_op1[1]) = 0x0018001800180018; +- *((unsigned long*)& __m256i_op1[0]) = 0x0018001800180018; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3000300030003000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3000300030003000; +- __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_result[3]) = 0x1e9e1e9e1e9e1e9e; +- *((unsigned long*)& __m256i_result[2]) = 0x1e9e1e9e1e9e1e9e; +- *((unsigned long*)& __m256i_result[1]) = 0x1e9e1e9e1e9e1e9e; +- *((unsigned long*)& __m256i_result[0]) = 0x1e9e1e9e1e9e1e9e; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000001; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_result[2]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_result[1]) = 0x0002000000020000; +- *((unsigned long*)& __m256i_result[0]) = 0x0002000000020000; +- __m256i_out = __lasx_xvbitseti_w(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrml_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbsrl_v(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; +- __m256d_out = __lasx_xvflogb_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickve_d(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001d0000001d; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001d0000001d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001d0000001d; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001d0000001d; +- __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000020000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000004; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x7e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000000; +- __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_w_d(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000555500005555; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000555500005555; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000555500005555; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000555500005555; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrph_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x5a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000001000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- long_int_result = 0x0000000000000000; +- long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x0); +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256d_op1[2]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256d_op1[1]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256d_op1[0]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x01000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00ffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000555500005555; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000555500005555; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000555500005555; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000555500005555; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_result[2]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_result[1]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_result[0]) = 0x01fe01fe01fe01fe; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256d_result[3]) = 0x437fe01fe01fe020; +- *((unsigned long*)& __m256d_result[2]) = 0x437fe01fe01fe020; +- *((unsigned long*)& __m256d_result[1]) = 0x437fe01fe01fe020; +- *((unsigned long*)& __m256d_result[0]) = 0x437fe01fe01fe020; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x01fe01fe01fe01fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_b(__m128i_op0,0x8); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x45); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0xbf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; +- __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x037fe01f001fe020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x037fe01f001fe020; +- *((unsigned long*)& __m256i_result[3]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[2]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[1]) = 0x2020202020202020; +- *((unsigned long*)& __m256i_result[0]) = 0x2020202020202020; +- __m256i_out = __lasx_xvreplve0_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x437fe01fe01fe020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x437fe01fe01fe020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x037fe01f001fe020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x037fe01f001fe020; +- *((unsigned long*)& __m256i_result[3]) = 0x437f201f201f2020; +- *((unsigned long*)& __m256i_result[2]) = 0x037f201f001f2020; +- *((unsigned long*)& __m256i_result[1]) = 0x437f201f201f2020; +- *((unsigned long*)& __m256i_result[0]) = 0x037f201f001f2020; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x437f201f201f2020; +- *((unsigned long*)& __m256i_op1[2]) = 0x037f201f001f2020; +- *((unsigned long*)& __m256i_op1[1]) = 0x437f201f201f2020; +- *((unsigned long*)& __m256i_op1[0]) = 0x037f201f001f2020; +- *((unsigned long*)& __m256i_op2[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x21bb481000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x01bf481000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x21bb481000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x01bf481000000000; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000086fe0000403e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000403e00004040; +- *((unsigned long*)& __m256i_op0[1]) = 0x000086fe0000403e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000403e00004040; +- *((unsigned long*)& __m256i_op1[3]) = 0x000086fe0000403e; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000403e00004040; +- *((unsigned long*)& __m256i_op1[1]) = 0x000086fe0000403e; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000403e00004040; +- *((unsigned long*)& __m256i_result[3]) = 0x00001bfa000000f9; +- *((unsigned long*)& __m256i_result[2]) = 0x000000f900004040; +- *((unsigned long*)& __m256i_result[1]) = 0x00001bfa000000f9; +- *((unsigned long*)& __m256i_result[0]) = 0x000000f900004040; +- __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000086fe0000403e; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000403e00004040; +- *((unsigned long*)& __m256i_op1[1]) = 0x000086fe0000403e; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000403e00004040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000437f0000201f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000201f00002020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000437f0000201f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000201f00002020; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00001bfa000000f9; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000f900004040; +- *((unsigned long*)& __m256d_op0[1]) = 0x00001bfa000000f9; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000f900004040; +- *((unsigned long*)& __m256d_result[3]) = 0x60183329ceb52cf0; +- *((unsigned long*)& __m256d_result[2]) = 0x6040392cdaf9b3ff; +- *((unsigned long*)& __m256d_result[1]) = 0x60183329ceb52cf0; +- *((unsigned long*)& __m256d_result[0]) = 0x6040392cdaf9b3ff; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[3]) = 0x21bb481000ff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x01bf481000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x21bb481000ff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x01bf481000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xb1b3b1b1b1b7b1b1; +- *((unsigned long*)& __m256i_result[2]) = 0xb1b7b1b1b1b1b1b1; +- *((unsigned long*)& __m256i_result[1]) = 0xb1b3b1b1b1b7b1b1; +- *((unsigned long*)& __m256i_result[0]) = 0xb1b7b1b1b1b1b1b1; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xb7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x5d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x43800000; +- *((int*)& __m128_result[0]) = 0x43800000; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op1[3]) = 0x000008e4bfc4eff0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000001ffee10000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000008e4bfc4eff0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000001ffee10000; +- *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d000000000d; +- *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0000060d0d; +- *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d000000000d; +- *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0000060d0d; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x1); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000800000008; +- __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000800000008; +- __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[1]) = 0xff0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; +- __m256i_out = __lasx_xvreplve0_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op1[3]) = 0xff0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op1[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op1[1]) = 0xff0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_op1[0]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xff0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256d_op0[2]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256d_op0[1]) = 0xff0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256d_op0[0]) = 0x0d0d0d0d0d0d0d0d; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; +- __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_h(__m128i_op0,14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256d_op0[2]) = 0x6040190d00000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256d_op0[0]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_result[2]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_result[0]) = 0x6040190d00000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x43800000; +- *((int*)& __m128_op0[0]) = 0x43800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_result[0]) = 0xfffdfffdfffdfffd; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffedffedffedffed; +- *((unsigned long*)& __m128i_result[0]) = 0xffedffedffedffed; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000800200028; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128d_op1[0]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_result[1]) = 0xffefffefffefffef; +- *((unsigned long*)& __m128i_result[0]) = 0xffefffefffefffef; +- __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256d_op1[2]) = 0x6040190d00000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256d_op1[0]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_result[1]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_result[0]) = 0xfffdfffdfffdfffd; +- __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x7e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffdfffdfffd; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffdfffcfffdfffc; +- *((unsigned long*)& __m128i_result[0]) = 0xfffdfffcfffdfffc; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_result[3]) = 0x080808000828082f; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080008280820; +- *((unsigned long*)& __m256i_result[1]) = 0x080808000828082f; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080008280820; +- __m256i_out = __lasx_xvbitrevi_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000400100013; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000400100014; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000400100013; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; +- __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x080808000828082f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0808080008280820; +- *((unsigned long*)& __m256i_op0[1]) = 0x080808000828082f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0808080008280820; +- *((unsigned long*)& __m256i_op1[3]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op1[2]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op1[0]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00828082f0808080; +- *((unsigned long*)& __m256i_result[2]) = 0xf18181818132feea; +- *((unsigned long*)& __m256i_result[1]) = 0x00828082f0808080; +- *((unsigned long*)& __m256i_result[0]) = 0xf18181818132feea; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x24); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000006040190d; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000006040190d; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000860601934; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000860601934; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000800200028; +- __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000800200027; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_result[3]) = 0x006018000000001a; +- *((unsigned long*)& __m256i_result[2]) = 0x0060401900000000; +- *((unsigned long*)& __m256i_result[1]) = 0x006018000000001a; +- *((unsigned long*)& __m256i_result[0]) = 0x0060401900000000; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000860601934; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000800200028; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000860601934; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000800200028; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffdfffcfffdfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffdfffcfffdfffc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_result[2]) = 0x0a0a0a0a00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_result[0]) = 0x0a0a0a0a00000000; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0a0a000000000a0a; +- __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op1[2]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op1[0]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_result[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_result[2]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_result[0]) = 0x6040190d00000000; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffefffefffefffef; +- *((unsigned long*)& __m128i_op1[0]) = 0xffefffefffefffef; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_op0[2]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op2[3]) = 0x2020080800000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000004044f4f; +- *((unsigned long*)& __m256i_op2[1]) = 0x0ef11ae55a5a6767; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_result[2]) = 0x6040190d20227a78; +- *((unsigned long*)& __m256i_result[1]) = 0x132feeabd2d33b38; +- *((unsigned long*)& __m256i_result[0]) = 0x6040190d00000000; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000400100013; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000400100014; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000400100013; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_op1[3]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_op1[2]) = 0x0a0a0a0a00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_op1[0]) = 0x0a0a0a0a00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000020200000202; +- *((unsigned long*)& __m256i_result[2]) = 0x4100004141410000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000020200000000; +- *((unsigned long*)& __m256i_result[0]) = 0x4100004141410000; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000860601934; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000860601934; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000800200028; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000006040190d; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000006040190d; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000006040190c; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff9fbfe6f3; +- *((unsigned long*)& __m256i_result[1]) = 0x000000006040190c; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff9fbfe6f3; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000008; +- *((int*)& __m256_op0[6]) = 0x60601934; +- *((int*)& __m256_op0[5]) = 0x00000008; +- *((int*)& __m256_op0[4]) = 0x00200028; +- *((int*)& __m256_op0[3]) = 0x00000008; +- *((int*)& __m256_op0[2]) = 0x60601934; +- *((int*)& __m256_op0[1]) = 0x00000008; +- *((int*)& __m256_op0[0]) = 0x00200028; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfdiv_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffefffefffefffef; +- *((unsigned long*)& __m128i_op1[0]) = 0xffefffefffefffef; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0004001000100004; +- *((unsigned long*)& __m256i_result[2]) = 0x0004000400100010; +- *((unsigned long*)& __m256i_result[1]) = 0x0004001000100004; +- *((unsigned long*)& __m256i_result[0]) = 0x0004000400100010; +- __m256i_out = __lasx_xvclz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0a0a0a0a00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0a0a000000000a0a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0a0a0a0a00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x006018000000001a; +- *((unsigned long*)& __m256i_op0[2]) = 0x0060401900000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x006018000000001a; +- *((unsigned long*)& __m256i_op0[0]) = 0x0060401900000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000006170; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000006170; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000006170; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000006170; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000030b8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000030b8; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0004000f00100003; +- *((unsigned long*)& __m256i_op0[2]) = 0x000400030010000f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0004000f00100003; +- *((unsigned long*)& __m256i_op0[0]) = 0x000400030010000f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_d(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffeffff; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000800000008000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000800000008000; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000040000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000040000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000040000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000040000000000; +- __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0004000f00100003; +- *((unsigned long*)& __m256i_op1[2]) = 0x000400030010000f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0004000f00100003; +- *((unsigned long*)& __m256i_op1[0]) = 0x000400030010000f; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffbfffcffeffff0; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffbfffcffeffff0; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0004000f00100003; +- *((unsigned long*)& __m256i_op1[2]) = 0x000400030010000f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0004000f00100003; +- *((unsigned long*)& __m256i_op1[0]) = 0x000400030010000f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0400100004001000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0400100004001000; +- __m256i_out = __lasx_xvssrani_hu_w(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000f0000000f; +- __m128i_out = __lsx_vclz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_op0[2]) = 0x6040190d20227a78; +- *((unsigned long*)& __m256i_op0[1]) = 0x132feeabd2d33b38; +- *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0004000f00100003; +- *((unsigned long*)& __m256i_op1[2]) = 0x000400030010000f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0004000f00100003; +- *((unsigned long*)& __m256i_op1[0]) = 0x000400030010000f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000c0300000019a; +- *((unsigned long*)& __m256i_result[2]) = 0x0c08032100004044; +- *((unsigned long*)& __m256i_result[1]) = 0x0000265ffa5a6767; +- *((unsigned long*)& __m256i_result[0]) = 0x0c08032100000000; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op0[2]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op0[0]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000f0000000f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000f0000000f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op0[2]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op0[0]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_result[3]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_result[2]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_result[1]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_result[0]) = 0x132feea900000000; +- __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op0[2]) = 0x132feea900000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f18181818; +- *((unsigned long*)& __m256i_op0[0]) = 0x132feea900000000; +- *((unsigned long*)& __m256d_result[3]) = 0x4393a0a5bc606060; +- *((unsigned long*)& __m256d_result[2]) = 0x43b32feea9000000; +- *((unsigned long*)& __m256d_result[1]) = 0x4393a0a5bc606060; +- *((unsigned long*)& __m256d_result[0]) = 0x43b32feea9000000; +- __m256d_out = __lasx_xvffint_d_l(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000010000000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000800000008000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000800000008000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000800000008000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000800000008000; +- __m128i_out = __lsx_vadda_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x04e8296f08181818; +- *((unsigned long*)& __m256d_op1[2]) = 0x032feea900000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x04e8296f08181818; +- *((unsigned long*)& __m256d_op1[0]) = 0x032feea900000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x296e000018170000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x296e000018170000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x04e8296f; +- *((int*)& __m256_op0[6]) = 0x18181818; +- *((int*)& __m256_op0[5]) = 0x132feea9; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x04e8296f; +- *((int*)& __m256_op0[2]) = 0x18181818; +- *((int*)& __m256_op0[1]) = 0x132feea9; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x5cbe15f2; +- *((int*)& __m256_result[6]) = 0x53261036; +- *((int*)& __m256_result[5]) = 0x559a674d; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x5cbe15f2; +- *((int*)& __m256_result[2]) = 0x53261036; +- *((int*)& __m256_result[1]) = 0x559a674d; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000080; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vftintrph_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op2[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6018000000000cd1; +- *((unsigned long*)& __m256i_op0[2]) = 0x6040190d20227a78; +- *((unsigned long*)& __m256i_op0[1]) = 0x132feeabd2d33b38; +- *((unsigned long*)& __m256i_op0[0]) = 0x6040190d00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x9fe7fffffffff32e; +- *((unsigned long*)& __m256i_result[2]) = 0x6040190ddfdd8587; +- *((unsigned long*)& __m256i_result[1]) = 0xecd011542d2cc4c7; +- *((unsigned long*)& __m256i_result[0]) = 0x6040190dffffffff; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000030b8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000030b8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000030b8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000030b8; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000030b8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000030b8; +- *((unsigned long*)& __m256i_result[3]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_result[2]) = 0x00020002000230ba; +- *((unsigned long*)& __m256i_result[1]) = 0x0002000200020002; +- *((unsigned long*)& __m256i_result[0]) = 0x00020002000230ba; +- __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000030b8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000030b8; +- *((unsigned long*)& __m256i_op1[3]) = 0x9fe7fffffffff32e; +- *((unsigned long*)& __m256i_op1[2]) = 0x6040190ddfdd8587; +- *((unsigned long*)& __m256i_op1[1]) = 0xecd011542d2cc4c7; +- *((unsigned long*)& __m256i_op1[0]) = 0x6040190dffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_w(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000080; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_wu_d(__m256i_op0,__m256i_op1,0x35); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f08181818; +- *((unsigned long*)& __m256i_op0[2]) = 0x032feea900000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f08181818; +- *((unsigned long*)& __m256i_op0[0]) = 0x032feea900000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; +- __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000001; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000001; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m256_op0[7]) = 0x4393a0a5; +- *((int*)& __m256_op0[6]) = 0xbc606060; +- *((int*)& __m256_op0[5]) = 0x43b32fee; +- *((int*)& __m256_op0[4]) = 0xa9000000; +- *((int*)& __m256_op0[3]) = 0x4393a0a5; +- *((int*)& __m256_op0[2]) = 0xbc606060; +- *((int*)& __m256_op0[1]) = 0x43b32fee; +- *((int*)& __m256_op0[0]) = 0xa9000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000001; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff0000; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_w(__m128i_op0,-10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256d_op1[3]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256d_op1[2]) = 0x408480007fff0000; +- *((unsigned long*)& __m256d_op1[1]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256d_op1[0]) = 0x408480007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4393a0a5bc606060; +- *((unsigned long*)& __m256i_op0[2]) = 0x43b32feea9000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x4393a0a5bc606060; +- *((unsigned long*)& __m256i_op0[0]) = 0x43b32feea9000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256i_op1[2]) = 0x408480007fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256i_op1[0]) = 0x408480007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x04e8296f3c611818; +- *((unsigned long*)& __m256i_result[2]) = 0x032eafee29010000; +- *((unsigned long*)& __m256i_result[1]) = 0x04e8296f3c611818; +- *((unsigned long*)& __m256i_result[0]) = 0x032eafee29010000; +- __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256d_op0[2]) = 0x408480007fff0000; +- *((unsigned long*)& __m256d_op0[1]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256d_op0[0]) = 0x408480007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x4084800000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x4084800000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_d(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256d_op0[2]) = 0x408480007fff0000; +- *((unsigned long*)& __m256d_op0[1]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256d_op0[0]) = 0x408480007fff0000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x04e8296f3c611818; +- *((unsigned long*)& __m256i_op0[2]) = 0x032eafee29010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x04e8296f3c611818; +- *((unsigned long*)& __m256i_op0[0]) = 0x032eafee29010000; +- *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff00000000ffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000000ffffff; +- __m256i_out = __lasx_xvslei_bu(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_op0[2]) = 0x00e9a80014ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_op0[0]) = 0x00e9a80014ff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00b213181dff0607; +- *((unsigned long*)& __m256i_result[2]) = 0x00e9a80114ff0001; +- *((unsigned long*)& __m256i_result[1]) = 0x00b213181dff0607; +- *((unsigned long*)& __m256i_result[0]) = 0x00e9a80114ff0001; +- __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_op1[2]) = 0x00e9a80014ff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_op1[0]) = 0x00e9a80014ff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_result[2]) = 0x00e9a80014ff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_result[0]) = 0x00e9a80014ff0000; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffff0001ffff0001; +- *((unsigned long*)& __m256d_op0[2]) = 0xffff0001ffff0001; +- *((unsigned long*)& __m256d_op0[1]) = 0xffff0001ffff0001; +- *((unsigned long*)& __m256d_op0[0]) = 0xffff0001ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0001ffff0001; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0001ffff0001; +- *((unsigned long*)& __m256i_result[1]) = 0xffff0001ffff0001; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0001ffff0001; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256i_op0[2]) = 0x408480007fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256i_op0[0]) = 0x408480007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_result[2]) = 0x0003000300030000; +- *((unsigned long*)& __m256i_result[1]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_result[0]) = 0x0003000300030000; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00010001; +- *((int*)& __m128_op0[2]) = 0x00010001; +- *((int*)& __m128_op0[1]) = 0x00010001; +- *((int*)& __m128_op0[0]) = 0x00010001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000080; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_wu(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00010001; +- *((int*)& __m128_op0[2]) = 0x00010001; +- *((int*)& __m128_op0[1]) = 0x00010001; +- *((int*)& __m128_op0[0]) = 0x00010001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0020010101610000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0061200000610000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0020010101610000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0061200000610000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000101000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00011fff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000101000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00011fff0000ffff; +- __m256i_out = __lasx_xvaddwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_op0[2]) = 0x00e9a80014ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_op0[0]) = 0x00e9a80014ff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00000000ffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00000000ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x3b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256i_op0[2]) = 0x408480007fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3eab77367fff4848; +- *((unsigned long*)& __m256i_op0[0]) = 0x408480007fff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000700000008; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000700000008; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00ff00ff; +- *((int*)& __m256_op0[6]) = 0x00ff00ff; +- *((int*)& __m256_op0[5]) = 0x00ff00ff; +- *((int*)& __m256_op0[4]) = 0x00ff00ff; +- *((int*)& __m256_op0[3]) = 0x00ff00ff; +- *((int*)& __m256_op0[2]) = 0x00ff00ff; +- *((int*)& __m256_op0[1]) = 0x00ff00ff; +- *((int*)& __m256_op0[0]) = 0x00ff00ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvftintrp_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00000000ffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00000000ffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff00000000ffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000000ffffff; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vneg_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[3]) = 0x7f7fff7f7f7fff7f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7fff7f7f7fff7f; +- *((unsigned long*)& __m256i_result[1]) = 0x7f7fff7f7f7fff7f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f7fff7f7f7fff7f; +- __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvpcnt_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[3]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_result[2]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_result[1]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_result[0]) = 0x000408080c111414; +- __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00200101; +- *((int*)& __m256_op0[6]) = 0x01610000; +- *((int*)& __m256_op0[5]) = 0x00612000; +- *((int*)& __m256_op0[4]) = 0x00610000; +- *((int*)& __m256_op0[3]) = 0x00200101; +- *((int*)& __m256_op0[2]) = 0x01610000; +- *((int*)& __m256_op0[1]) = 0x00612000; +- *((int*)& __m256_op0[0]) = 0x00610000; +- *((unsigned long*)& __m256i_result[3]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_result[2]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_result[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_result[0]) = 0x3f8000003f800000; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000408080c111414; +- *((unsigned long*)& __m256d_op0[2]) = 0x000408080c111414; +- *((unsigned long*)& __m256d_op0[1]) = 0x000408080c111414; +- *((unsigned long*)& __m256d_op0[0]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x24); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x3e8000003e800000; +- *((unsigned long*)& __m256i_result[2]) = 0x3e8000003e800000; +- *((unsigned long*)& __m256i_result[1]) = 0x3e8000003e800000; +- *((unsigned long*)& __m256i_result[0]) = 0x3e8000003e800000; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00b2fe28e4420609; +- *((unsigned long*)& __m256i_op0[2]) = 0x028da7fe15020000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00b2fe28e4420609; +- *((unsigned long*)& __m256i_op0[0]) = 0x028da7fe15020000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000598; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000598; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0x6d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000598; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000598; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000002cc0000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000002cc0000; +- __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x31); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x2d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xb6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xf039b8c0; +- *((int*)& __m128_op0[2]) = 0xc61e81ef; +- *((int*)& __m128_op0[1]) = 0x6db7da53; +- *((int*)& __m128_op0[0]) = 0xfbd2e34b; +- *((unsigned long*)& __m128i_result[1]) = 0x80000000ffffd860; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff80000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x02020102; +- *((int*)& __m256_op0[6]) = 0x02020102; +- *((int*)& __m256_op0[5]) = 0x02020102; +- *((int*)& __m256_op0[4]) = 0x02020102; +- *((int*)& __m256_op0[3]) = 0x02020102; +- *((int*)& __m256_op0[2]) = 0x02020102; +- *((int*)& __m256_op0[1]) = 0x02020102; +- *((int*)& __m256_op0[0]) = 0x02020102; +- *((int*)& __m256_op1[7]) = 0x3e800000; +- *((int*)& __m256_op1[6]) = 0x3e800000; +- *((int*)& __m256_op1[5]) = 0x3e800000; +- *((int*)& __m256_op1[4]) = 0x3e800000; +- *((int*)& __m256_op1[3]) = 0x3e800000; +- *((int*)& __m256_op1[2]) = 0x3e800000; +- *((int*)& __m256_op1[1]) = 0x3e800000; +- *((int*)& __m256_op1[0]) = 0x3e800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000598; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000598; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x34); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000001c000000134; +- *((unsigned long*)& __m256d_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256d_op0[1]) = 0x000001c000000134; +- *((unsigned long*)& __m256d_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256d_op1[3]) = 0x000001c000000134; +- *((unsigned long*)& __m256d_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256d_op1[1]) = 0x000001c000000134; +- *((unsigned long*)& __m256d_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256d_result[3]) = 0x0000038000000268; +- *((unsigned long*)& __m256d_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256d_result[1]) = 0x0000038000000268; +- *((unsigned long*)& __m256d_result[0]) = 0x7fff7fff7fff7fff; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0x80000000; +- *((int*)& __m128_op0[2]) = 0xffffd860; +- *((int*)& __m128_op0[1]) = 0x7fffffff; +- *((int*)& __m128_op0[0]) = 0x80000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; +- __m256i_out = __lasx_xvadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[3]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_result[2]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_result[1]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_result[0]) = 0x0002000200010002; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x80000000ffffd860; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff80000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff80000000; +- __m128i_out = __lsx_vaddwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000038000000268; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000038000000268; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001010101; +- __m256i_out = __lasx_xvssrarni_bu_h(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x02020102; +- *((int*)& __m256_op0[6]) = 0x02020102; +- *((int*)& __m256_op0[5]) = 0x02020102; +- *((int*)& __m256_op0[4]) = 0x02020102; +- *((int*)& __m256_op0[3]) = 0x02020102; +- *((int*)& __m256_op0[2]) = 0x02020102; +- *((int*)& __m256_op0[1]) = 0x02020102; +- *((int*)& __m256_op0[0]) = 0x02020102; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffe400000707; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000af100001455; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffe400000707; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000af100001455; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmini_du(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_op1[2]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_op1[1]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_op1[0]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m256d_op1[3]) = 0x000408080c111414; +- *((unsigned long*)& __m256d_op1[2]) = 0x000408080c111414; +- *((unsigned long*)& __m256d_op1[1]) = 0x000408080c111414; +- *((unsigned long*)& __m256d_op1[0]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000038000000268; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000038000000268; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_op0[2]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_op0[1]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_op0[0]) = 0x000408080c111414; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff7fff; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_result[2]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_result[1]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_result[0]) = 0x7fe363637fe36363; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0x63); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000038000000268; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000038000000268; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x000001200000011a; +- *((unsigned long*)& __m256i_result[2]) = 0x2040204020402040; +- *((unsigned long*)& __m256i_result[1]) = 0x000001200000011a; +- *((unsigned long*)& __m256i_result[0]) = 0x2040204020402040; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010001; +- __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000009e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000009e; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001ffff0101ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff81001dff9dff9e; +- *((unsigned long*)& __m256i_op0[2]) = 0xff81001dff9d003b; +- *((unsigned long*)& __m256i_op0[1]) = 0xff81001dff9dff9e; +- *((unsigned long*)& __m256i_op0[0]) = 0xff81001dff9d003b; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff81001dff9dff9e; +- *((unsigned long*)& __m256i_result[2]) = 0xff81001dff9d003b; +- *((unsigned long*)& __m256i_result[1]) = 0xff81001dff9dff9e; +- *((unsigned long*)& __m256i_result[0]) = 0xff81001dff9d003b; +- __m256i_out = __lasx_xvssub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff81001dff9dff9e; +- *((unsigned long*)& __m256i_op0[2]) = 0xff81001dff9d003b; +- *((unsigned long*)& __m256i_op0[1]) = 0xff81001dff9dff9e; +- *((unsigned long*)& __m256i_op0[0]) = 0xff81001dff9d003b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; +- __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001010002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010002; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff81001dff9dff9e; +- *((unsigned long*)& __m256i_op0[2]) = 0xff81001dff9d003b; +- *((unsigned long*)& __m256i_op0[1]) = 0xff81001dff9dff9e; +- *((unsigned long*)& __m256i_op0[0]) = 0xff81001dff9d003b; +- *((unsigned long*)& __m256i_op1[3]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0002000200010002; +- *((unsigned long*)& __m256i_result[3]) = 0x7f1d7f7f7f1d7f3b; +- *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[1]) = 0x7f1d7f7f7f1d7f3b; +- *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x7f1d7f7f; +- *((int*)& __m256_op0[6]) = 0x7f1d7f3b; +- *((int*)& __m256_op0[5]) = 0x02020102; +- *((int*)& __m256_op0[4]) = 0x02020102; +- *((int*)& __m256_op0[3]) = 0x7f1d7f7f; +- *((int*)& __m256_op0[2]) = 0x7f1d7f3b; +- *((int*)& __m256_op0[1]) = 0x02020102; +- *((int*)& __m256_op0[0]) = 0x02020102; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000010; +- *((int*)& __m256_op0[6]) = 0x00000010; +- *((int*)& __m256_op0[5]) = 0x00000010; +- *((int*)& __m256_op0[4]) = 0x00000010; +- *((int*)& __m256_op0[3]) = 0x00000010; +- *((int*)& __m256_op0[2]) = 0x00000010; +- *((int*)& __m256_op0[1]) = 0x00000010; +- *((int*)& __m256_op0[0]) = 0x00000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffffff00; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000001; +- *((int*)& __m128_op1[2]) = 0x00000001; +- *((int*)& __m128_op1[1]) = 0x00000001; +- *((int*)& __m128_op1[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001ffff0101ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0103fefd0303fefd; +- *((unsigned long*)& __m128i_result[0]) = 0x0103fefd0103fefd; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffefff00001000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffefff00001000; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffff00; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x000103030102ffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff00ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000010102ffff; +- __m128i_out = __lsx_vpickev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128d_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_result[3]) = 0x40f23232330df9c8; +- *((unsigned long*)& __m256i_result[2]) = 0x40f2323240f23232; +- *((unsigned long*)& __m256i_result[1]) = 0x40f23232330df9c8; +- *((unsigned long*)& __m256i_result[0]) = 0x40f2323240f23232; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010100000000; +- __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vextl_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000101010015; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffed00010001; +- __m128i_out = __lsx_vmaddwev_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000101010001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; +- __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x02020102; +- *((int*)& __m256_op1[6]) = 0x02020102; +- *((int*)& __m256_op1[5]) = 0x02020102; +- *((int*)& __m256_op1[4]) = 0x02020102; +- *((int*)& __m256_op1[3]) = 0x02020102; +- *((int*)& __m256_op1[2]) = 0x02020102; +- *((int*)& __m256_op1[1]) = 0x02020102; +- *((int*)& __m256_op1[0]) = 0x02020102; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000201220001011c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000201220001011c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000201220001011c; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000201220001011c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvsadd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000014; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000001400000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128d_result[0]) = 0x1f81e3779b97f4a8; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256d_op0[2]) = 0x7fe363637fe36364; +- *((unsigned long*)& __m256d_op0[1]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256d_op0[0]) = 0x7fe363637fe36364; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fe363637fe36364; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fe363637fe36364; +- *((unsigned long*)& __m256i_result[3]) = 0x00001ff8d8d8c000; +- *((unsigned long*)& __m256i_result[2]) = 0x00001ff8d8d90000; +- *((unsigned long*)& __m256i_result[1]) = 0x00001ff8d8d8c000; +- *((unsigned long*)& __m256i_result[0]) = 0x00001ff8d8d90000; +- __m256i_out = __lasx_xvsllwil_d_w(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001400000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1f81e3779b97f4a8; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff02000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1f81e3779b97f4a8; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00e30064001a008f; +- *((unsigned long*)& __m256i_result[2]) = 0x00e3006300e30063; +- *((unsigned long*)& __m256i_result[1]) = 0x00e30064001a008f; +- *((unsigned long*)& __m256i_result[0]) = 0x00e3006300e30063; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffff02000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x1f81e3779b97f4a8; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff02000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000014; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000014; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xc3110000; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0xc3110000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- int_op0 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff02000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000008; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000008; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000008; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000008; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000008; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000008; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000008; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000008; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000001; +- *((int*)& __m256_op2[4]) = 0x00000001; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000001; +- *((int*)& __m256_op2[0]) = 0x00000001; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x80000001; +- *((int*)& __m256_result[4]) = 0x80000001; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x80000001; +- *((int*)& __m256_result[0]) = 0x80000001; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_b(__m256i_op0,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op1[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[3]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0101000101010001; +- __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fe36364661af18f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fe363637fe36363; +- *((unsigned long*)& __m256i_result[3]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0101000101010001; +- __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0200000202000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0200000202000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0200000202000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0200000202000002; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0200000202000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0200000202000002; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0200000202000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0200000202000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000400010004; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000400010004; +- __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000014; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0200000202000002; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0200000202000002; +- *((unsigned long*)& __m256d_op1[3]) = 0x0101000101010001; +- *((unsigned long*)& __m256d_op1[2]) = 0x0101000101010001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0101000101010001; +- *((unsigned long*)& __m256d_op1[0]) = 0x0101000101010001; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0101000101010001; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0101000101010001; +- __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00001ff8d8d8c000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00001ff8d8d90000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00001ff8d8d8c000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00001ff8d8d90000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0200000202000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0200000202000002; +- *((unsigned long*)& __m256i_result[3]) = 0x00001ff800000000; +- *((unsigned long*)& __m256i_result[2]) = 0xd8d8c00000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00001ff800000000; +- *((unsigned long*)& __m256i_result[0]) = 0xd8d8c00000000000; +- __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0101000101010001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0101000101010001; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0101000101010001; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0101000101010001; +- __m256d_out = __lasx_xvfmax_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_result[0]) = 0x0202010202020102; +- __m256i_out = __lasx_xvreplve0_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[2]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[1]) = 0x0202010202020102; +- *((unsigned long*)& __m256i_op0[0]) = 0x0202010202020102; +- *((unsigned long*)& __m256d_result[3]) = 0x4380100810101008; +- *((unsigned long*)& __m256d_result[2]) = 0x4380100810101008; +- *((unsigned long*)& __m256d_result[1]) = 0x4380100810101008; +- *((unsigned long*)& __m256d_result[0]) = 0x4380100810101008; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_h(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00001ff800000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xd8d8c00000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00001ff800000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xd8d8c00000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00001ff8; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xd8d8c000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00001ff8; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xd8d8c000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x02020102; +- *((int*)& __m256_op1[6]) = 0x02020102; +- *((int*)& __m256_op1[5]) = 0x02020102; +- *((int*)& __m256_op1[4]) = 0x02020102; +- *((int*)& __m256_op1[3]) = 0x02020102; +- *((int*)& __m256_op1[2]) = 0x02020102; +- *((int*)& __m256_op1[1]) = 0x02020102; +- *((int*)& __m256_op1[0]) = 0x02020102; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0010000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0007fff800000000; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000014; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001400000014; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101000101010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000000010000; +- __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0014001400140000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000554; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0014001400140000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001400000014; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001400000000; +- __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00001ff8d8d8c000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00001ff8d8d90000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00001ff8d8d8c000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00001ff8d8d90000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00001ef8d8d8c000; +- *((unsigned long*)& __m256i_result[2]) = 0x00001ef8d8d80000; +- *((unsigned long*)& __m256i_result[1]) = 0x00001ef8d8d8c000; +- *((unsigned long*)& __m256i_result[0]) = 0x00001ef8d8d80000; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfff2fff2fff2fff2; +- *((unsigned long*)& __m256i_result[2]) = 0xfff2fff2fff2fff2; +- *((unsigned long*)& __m256i_result[1]) = 0xfff2fff2fff2fff2; +- *((unsigned long*)& __m256i_result[0]) = 0xfff2fff2fff2fff2; +- __m256i_out = __lasx_xvmini_h(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001400000014; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001400000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000053a4f452; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001400000014; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001400000000; +- __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001400000014; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001400000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff9000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc000400000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0007001400000014; +- *((unsigned long*)& __m128i_result[0]) = 0x0004001000000000; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000400010004; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000400010004; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000400010004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000400010004; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000053a4f452; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000053a; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000400010004; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000400010004; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000e0001000e; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000e0001000e; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000e0001000e; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000e0001000e; +- __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000053a; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0007001400000014; +- *((unsigned long*)& __m128i_op0[0]) = 0x0004001000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000000053a; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000700140000053a; +- *((unsigned long*)& __m128i_result[0]) = 0x0004001000000000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000e0001000e; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000e0001000e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000e0001000e; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000e0001000e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000053a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff9000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffc000400000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffc000400000000; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000014; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000014; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xfffc0004; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; +- __m128d_out = __lsx_vflogb_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x0000000e; +- *((int*)& __m256_op1[6]) = 0x0000000e; +- *((int*)& __m256_op1[5]) = 0x0000000e; +- *((int*)& __m256_op1[4]) = 0x0000000e; +- *((int*)& __m256_op1[3]) = 0x0000000e; +- *((int*)& __m256_op1[2]) = 0x0000000e; +- *((int*)& __m256_op1[1]) = 0x0000000e; +- *((int*)& __m256_op1[0]) = 0x0000000e; +- *((int*)& __m256_result[7]) = 0x0000000e; +- *((int*)& __m256_result[6]) = 0x0000000e; +- *((int*)& __m256_result[5]) = 0x0000000e; +- *((int*)& __m256_result[4]) = 0x0000000e; +- *((int*)& __m256_result[3]) = 0x0000000e; +- *((int*)& __m256_result[2]) = 0x0000000e; +- *((int*)& __m256_result[1]) = 0x0000000e; +- *((int*)& __m256_result[0]) = 0x0000000e; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrai_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000010000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080000700000014; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffbffda; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001010101; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080000700000014; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffbffda; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; +- *((unsigned long*)& __m128i_op2[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0080000700000014; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fffbffda; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffc000400000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00003fff00010000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplve_w(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmini_w(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080000700000014; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffbffda; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00003fff00010000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00123fff00120012; +- *((unsigned long*)& __m128i_result[0]) = 0x0012001200120012; +- __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00123fff00120012; +- *((unsigned long*)& __m128i_op0[0]) = 0x0012001200120012; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000005003a; +- *((unsigned long*)& __m128i_result[1]) = 0x00123fff00120012; +- *((unsigned long*)& __m128i_result[0]) = 0x001200120017004c; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0007000700070007; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; +- *((unsigned long*)& __m256i_result[2]) = 0xf2f2f2f2f2f2f2f2; +- *((unsigned long*)& __m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; +- *((unsigned long*)& __m256i_result[0]) = 0xf2f2f2f2f2f2f2f2; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000008000000080; +- *((unsigned long*)& __m128i_result[0]) = 0x0000008000000080; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0xaa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00123fff00120012; +- *((unsigned long*)& __m128i_op0[0]) = 0x0012001200120012; +- *((unsigned long*)& __m128i_op1[1]) = 0x00003fff00010000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1200091212121212; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrar_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000e0000000d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000e0000000d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff03ffffff07; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff03ffffff07; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1200091212121212; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000e0000000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000f0001000f; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000f0001000d; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000f0001000f; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000f0001000d; +- __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000008000000080; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000008000000080; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; +- __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000f0001000f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000f0001000d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000f0001000f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000f0001000d; +- *((unsigned long*)& __m256i_result[3]) = 0x000000010000000f; +- *((unsigned long*)& __m256i_result[2]) = 0x000000010000000f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000010000000f; +- *((unsigned long*)& __m256i_result[0]) = 0x000000010000000d; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x55); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000008000000080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000008000000080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x51); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000008000000080; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000008000000080; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0x26); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x80000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x80000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskgez_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwev_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000e000e000e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000e0000000d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000e000e000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000e0000000d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000e000e000e; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000e0000000d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000e000e000e; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000e0000000d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000dfffffff1; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000cfffffff3; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000dfffffff1; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000cfffffff3; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000dfffffff1; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000cfffffff3; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000dfffffff1; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000cfffffff3; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000000d; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000dfffffff1; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000cfffffff3; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000dfffffff1; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000cfffffff3; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvdiv_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- long_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_d(__m256i_op0,long_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vmaxi_wu(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00003f3f00003f3f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff0000000f; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x56); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_h(__m128i_op0,-15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_w(__m128i_op0,-5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaxi_d(__m128i_op0,-5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff0000000d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000000e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000000e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff00ffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffff00ffffffffff; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; +- __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00008000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00008000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0400000004000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000400; +- *((unsigned long*)& __m256i_result[1]) = 0x0400000004000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000400; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000008000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_w(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_du_q(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; +- __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; +- __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- long_int_result = 0x0000000000000000; +- long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x2); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000000d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fffe0000000c; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fffe0000000c; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_h(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000003ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000003ff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_b(__m256i_op0,15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000800000000000; +- __m128i_out = __lsx_vextl_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff8000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff8000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff8000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0008000000000000; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_h(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,-7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000800000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_h_w(__m128i_op0,__m128i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_bu(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfcvth_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,-1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xf9f9f9f900000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xf9f9f9f900000002; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[3]) = 0xfff8fffffff8ffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfff8fffffff8ffff; +- *((unsigned long*)& __m256i_result[1]) = 0xfff8fffffff8ffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfff8fffffff8ffff; +- __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_b_h(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xfff8fffffff8ffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xfff8fffffff8ffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xfff8fffffff8ffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xfff8fffffff8ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00f9f9f900000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00f9f9f900000002; +- *((unsigned long*)& __m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000faf3f3f2; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000faf3f3f2; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00f9f9f900000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00f9f9f900000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x007cfcfd80000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x007cfcfd80000001; +- __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000faf3f3f2; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000faf3f3f2; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0607ffff0383; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0607ffffc0c1; +- *((unsigned long*)& __m256i_result[1]) = 0xffff0607ffff0383; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0607ffffc0c1; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000001000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000001000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007cfcfd80000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007cfcfd80000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000001000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000001000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0607ffff0607; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000faf3f3f2; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000faf3f3f2; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffdbbbcf; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffb8579f; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffdbbbcf; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffb8579f; +- __m256i_out = __lasx_xvmaddwev_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_result[2]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_result[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_result[0]) = 0xfffcfffcfffcfffc; +- __m256i_out = __lasx_xvsubi_hu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x5); +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000ffdbbbcf; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffb8579f; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000ffdbbbcf; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffb8579f; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffcfffc; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffcfffc; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffcfffc; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffcfffc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000003fff; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffdbbbcf; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffb8579f; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffdbbbcf; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffb8579f; +- *((unsigned long*)& __m256i_op2[3]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_op2[2]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_op2[1]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_op2[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op2[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0008000000000000; +- __m256i_out = __lasx_xvfrstpi_h(__m256i_op0,__m256i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000001555; +- *((unsigned long*)& __m256i_op0[2]) = 0x000015554001c003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000001555; +- *((unsigned long*)& __m256i_op0[0]) = 0x000015554001c003; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000304; +- *((unsigned long*)& __m256i_result[2]) = 0x0000030401010202; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000304; +- *((unsigned long*)& __m256i_result[0]) = 0x0000030401010202; +- __m256i_out = __lasx_xvpcnt_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0x00030005; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0x00030005; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; +- __m128i_out = __lsx_vmini_w(__m128i_op0,8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffc001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000c000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffc001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000c000; +- __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x6d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffcfffcfffcfffc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x0000ffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x0000ffff; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x0000ffff; +- __m128_out = __lsx_vfmin_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; +- __m128i_out = __lsx_vrotr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3e25c8317394dae6; +- *((unsigned long*)& __m128i_op0[0]) = 0xcda585aebbb2836a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xcda585aebbb2836a; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xcda585aebbb2836a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xcda585aebbb2836a; +- *((unsigned long*)& __m128i_result[1]) = 0xd78cfd70b5f65d76; +- *((unsigned long*)& __m128i_result[0]) = 0x5779108fdedda7e4; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3e25c8317394dae6; +- *((unsigned long*)& __m128i_op1[0]) = 0xcda585aebbb2836a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0xfefeff00fefeff00; +- *((unsigned long*)& __m128i_result[0]) = 0xfefeff00fefeff00; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_result[3]) = 0xffff000300030000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffc000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff000300030000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffc000; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xcda585aebbb2836a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000080808080; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffc4cdfd16; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000faf3f3f2; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000faf3f3f2; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fffcfffcfffc; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffdbbbcf; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xffb8579f; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffdbbbcf; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffb8579f; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0xfff8579f; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0xfff8579f; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskgez_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128d_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd78cfd70b5f65d76; +- *((unsigned long*)& __m128i_op0[0]) = 0x5779108fdedda7e4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xd78cfd70b5f65d77; +- *((unsigned long*)& __m128i_result[0]) = 0x5779108fdedda7e5; +- __m128i_out = __lsx_vbitset_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x5b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000003fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000003fff; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffdbbbcf; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffb8579f; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffdbbbcf; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffb8579f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff00bb; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0057; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff00bb; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0057; +- __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0101010101010101; +- __m128i_out = __lsx_vclo_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefeff00fefeff00; +- *((unsigned long*)& __m128i_op0[0]) = 0xfefeff00fefeff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[1]) = 0x00007e7e00007e7e; +- *((unsigned long*)& __m128i_result[0]) = 0x00007e7e00007e7e; +- __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fff8579f; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xd78cfd70b5f65d77; +- *((unsigned long*)& __m128d_op1[0]) = 0x5779108fdedda7e5; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080800008; +- __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x80808080; +- *((int*)& __m128_op0[2]) = 0x80808080; +- *((int*)& __m128_op0[1]) = 0x80808080; +- *((int*)& __m128_op0[0]) = 0x80800008; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x80000000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000b3a6000067da; +- *((unsigned long*)& __m128i_op1[0]) = 0x00004e420000c26a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x7a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000faf3f3f2; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000f9f9f9f9; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000faf3f3f2; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff00bb; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000ff0057; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff00bb; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000ff0057; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fffa003e; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fffb009c; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fffa003e; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fffb009c; +- __m256i_out = __lasx_xvhsubw_hu_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff8579f; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff0007a861; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0007a861; +- __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000b3a6000067da; +- *((unsigned long*)& __m128i_op0[0]) = 0x00004e420000c26a; +- *((unsigned long*)& __m128i_op1[1]) = 0xd78cfd70b5f65d76; +- *((unsigned long*)& __m128i_op1[0]) = 0x5779108fdedda7e4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000b3a6000067da; +- *((unsigned long*)& __m128i_result[0]) = 0x5779108f0000c26a; +- __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd78cfd70b5f65d76; +- *((unsigned long*)& __m128i_op0[0]) = 0x5779108fdedda7e4; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vslei_w(__m128i_op0,-16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0000b3a6; +- *((int*)& __m128_op0[2]) = 0x000067da; +- *((int*)& __m128_op0[1]) = 0x00004e42; +- *((int*)& __m128_op0[0]) = 0x0000c26a; +- *((unsigned long*)& __m128d_result[1]) = 0x379674c000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x3789f68000000000; +- __m128d_out = __lsx_vfcvth_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffff0007a861; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff0007a861; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd78cfd70b5f65d76; +- *((unsigned long*)& __m128i_op0[0]) = 0x5779108fdedda7e4; +- *((unsigned long*)& __m128i_result[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x379674c000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3789f68000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x379674c000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3789f68000000000; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vffint_s_w(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0x8080808080800008; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x975ca6046e2e4889; +- *((unsigned long*)& __m128i_op0[0]) = 0x1748c4f9ed1a5870; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080ffffffff8080; +- *((unsigned long*)& __m128i_op1[0]) = 0x00008080ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xff80ffffffffff80; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff80ffffffff; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0x0007a861; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0x0007a861; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000003; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x379674c000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3789f68000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x379674c000000000; +- __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x379674c000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3789f68000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfefeff00fefeff00; +- *((unsigned long*)& __m128i_op1[0]) = 0xfefeff00fefeff00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00c0000000800000; +- __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbsrl_v(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff80ffffffffff80; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff80ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fffffffe; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffff0007a861; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff0007a861; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x379674c000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x379674c000000000; +- __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xff80ffffffffff80; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000ff80ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff0007a861; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff0007a861; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x975ca6046e2e4889; +- *((unsigned long*)& __m128i_op1[0]) = 0x1748c4f9ed1a5870; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1748c4f9ed1a5870; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x6a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ff960001005b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffa500010003; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0020000000000000; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x2b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000003; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000003; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000003; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000003; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_b(__m256i_op0,-15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffee00ba; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffee00ba; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffee; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffee; +- __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[2]) = 0x80008000fff98000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x80008000fff98000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6a5d5b056f2f4978; +- *((unsigned long*)& __m128i_op1[0]) = 0x17483c07141b5971; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002001000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000008000020000; +- __m128i_out = __lsx_vbitrev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff80ffffffffff80; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff80ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x6a5d5b056f2f4978; +- *((unsigned long*)& __m128i_op1[0]) = 0x17483c07141b5971; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0800010001ff8000; +- __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffee00ba; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffee00ba; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[2]) = 0x80008000fff98000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x80008000fff98000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00fffff500ba; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00fffff500ba; +- __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0x0007a861; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x0007a861; +- *((int*)& __m256_op1[7]) = 0x80008000; +- *((int*)& __m256_op1[6]) = 0x80008000; +- *((int*)& __m256_op1[5]) = 0x80008000; +- *((int*)& __m256_op1[4]) = 0xfff98000; +- *((int*)& __m256_op1[3]) = 0x80008000; +- *((int*)& __m256_op1[2]) = 0x80008000; +- *((int*)& __m256_op1[1]) = 0x80008000; +- *((int*)& __m256_op1[0]) = 0xfff98000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffee00ba; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffee00ba; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xefefefefefee00aa; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xefefefefefee00aa; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0800010001ff8000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x1748c4f9ed1a5870; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1748c4f9ed1a5870; +- __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x1748c4f9ed1a5870; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_b(__m128i_op0,12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x80008000fff98000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x80008000fff98000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x21); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_op1[2]) = 0x80008000fff98000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000800080008000; +- *((unsigned long*)& __m256i_op1[0]) = 0x80008000fff98000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x000000ff; +- *((int*)& __m256_op1[6]) = 0x000000ff; +- *((int*)& __m256_op1[5]) = 0x000000ff; +- *((int*)& __m256_op1[4]) = 0x000000ff; +- *((int*)& __m256_op1[3]) = 0x000000ff; +- *((int*)& __m256_op1[2]) = 0x000000ff; +- *((int*)& __m256_op1[1]) = 0x000000ff; +- *((int*)& __m256_op1[0]) = 0x000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6a5d5b056f2f4978; +- *((unsigned long*)& __m128i_op1[0]) = 0x17483c07141b5971; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xd4bade5e2e902836; +- __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0017004800c400f9; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ed001a00580070; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x800b7fe38062007b; +- *((unsigned long*)& __m128i_result[0]) = 0x0076800d802c0037; +- __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fffa003e; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fffb009c; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fffa003e; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fffb009c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvsrlri_d(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffee; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffee; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffff0000; +- *((int*)& __m256_op1[4]) = 0xffff0000; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffff0000; +- *((int*)& __m256_op1[0]) = 0xffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6a5d5b056f2f4978; +- *((unsigned long*)& __m128i_op0[0]) = 0x17483c07141b5971; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xd4bade5e2e902836; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x345002920f3017d6; +- __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; +- __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x800000ff800000ff; +- __m256i_out = __lasx_xvsll_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x345002920f3017d6; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; +- __m256i_out = __lasx_xvreplve0_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op1[3]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op1[2]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_op1[0]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m256i_result[3]) = 0x40fe00fe00fe00fe; +- *((unsigned long*)& __m256i_result[2]) = 0x40fe00fe00fe00fe; +- *((unsigned long*)& __m256i_result[1]) = 0x40fe00fe00fe00fe; +- *((unsigned long*)& __m256i_result[0]) = 0x40fe00fe00fe00fe; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x800000ff800000ff; +- __m256i_out = __lasx_xvmax_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x2e9028362e902836; +- *((unsigned long*)& __m128i_op1[0]) = 0x2e9028362e902836; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x345002920f3017d6; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff7fffffff7; +- __m128i_out = __lsx_vmini_w(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000002; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000002; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x34500292; +- *((int*)& __m128_op1[0]) = 0x0f3017d6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000fffffffe; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; +- __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256d_op1[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256d_op1[2]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256d_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256d_op1[0]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00ff00ff; +- *((int*)& __m256_op0[6]) = 0x00ff00ff; +- *((int*)& __m256_op0[5]) = 0x00ff00ff; +- *((int*)& __m256_op0[4]) = 0x00ff00ff; +- *((int*)& __m256_op0[3]) = 0x00ff00ff; +- *((int*)& __m256_op0[2]) = 0x00ff00ff; +- *((int*)& __m256_op0[1]) = 0x00ff00ff; +- *((int*)& __m256_op0[0]) = 0x00ff00ff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffff7fffffff7; +- *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1748c4f9ed1a5870; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[3]) = 0x80fe80ff80fe00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff80ff; +- *((unsigned long*)& __m256i_result[1]) = 0x80fe80ff80fe00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff80ff; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x67eb85afb2ebb000; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x5252adadadadadad; +- *((unsigned long*)& __m128i_op1[0]) = 0xadad52525252adad; +- *((unsigned long*)& __m128i_result[1]) = 0x0000adad0000adad; +- *((unsigned long*)& __m128i_result[0]) = 0x000052520000adad; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000adad0000adad; +- *((unsigned long*)& __m128i_op1[0]) = 0x000052520000adad; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x1748c4f9ed1a5870; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; +- __m128i_out = __lsx_vssrlrni_d_q(__m128i_op0,__m128i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_q(__m256i_op0,__m256i_op1,0xca); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff7cffd6ffc700b0; +- __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000080ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000080ff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x08000000000000f8; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x08000000000000f8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x800000ff800000ff; +- __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0080000000000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0080000000000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0080000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0080000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff7cffd6ffc700b0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x008300290038ff50; +- __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0080000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0080000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x08000000000000f8; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x08000000000000f8; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0200000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x2000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0200000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x2000000000000000; +- __m256i_out = __lasx_xvssrarni_wu_d(__m256i_op0,__m256i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x7fff8000; +- *((int*)& __m256_op1[6]) = 0x7fff0000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00008000; +- *((int*)& __m256_op1[3]) = 0x7fff8000; +- *((int*)& __m256_op1[2]) = 0x7fff0000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00008000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00830029; +- *((int*)& __m128_op0[0]) = 0x0038ff50; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0080000000000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0080000000000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000800080008000; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; +- __m128i_out = __lsx_vextl_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffffe; +- *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000000010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vssrlrni_hu_w(__m128i_op0,__m128i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op2[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_op2[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff800000ff; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0200000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x2000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0200000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x2000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000020000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x800000ff800000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff7fffffff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff7fffffff7fff; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x4000c08000000080; +- *((unsigned long*)& __m256i_result[2]) = 0x00000080c000c080; +- *((unsigned long*)& __m256i_result[1]) = 0x4000c08000000080; +- *((unsigned long*)& __m256i_result[0]) = 0x00000080c000c080; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffe00000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffe00000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_result[3]) = 0xfefee00000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfefee00000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000014155445; +- *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d9f5d800; +- *((unsigned long*)& __m128i_result[0]) = 0xe4c23ffb002a3a22; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfefee00000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfefee00000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0xfefee00000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfefee00000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffc0007ffe0002; +- *((unsigned long*)& __m256i_result[2]) = 0x8000400000018002; +- *((unsigned long*)& __m256i_result[1]) = 0xffffc0007ffe0002; +- *((unsigned long*)& __m256i_result[0]) = 0x8000400000018002; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_result[2]) = 0x8100810081008100; +- *((unsigned long*)& __m256i_result[1]) = 0x0100010001000100; +- *((unsigned long*)& __m256i_result[0]) = 0x8100810081008100; +- __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff800000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x800080ff800080ff; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_op1[1]) = 0x33f5c2d7d9f5d800; +- *((unsigned long*)& __m128i_op1[0]) = 0xe4c23ffb002a3a22; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4000c08000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000080c000c080; +- *((unsigned long*)& __m256i_op0[1]) = 0x4000c08000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000080c000c080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0010001000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op1[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x67eb85af0000b000; +- *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; +- __m128i_out = __lsx_vsigncov_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000400000003fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000400000003fff; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_result[3]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_result[2]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_result[1]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_result[0]) = 0x800080ff800080ff; +- __m256i_out = __lasx_xvreplve0_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; +- __m128i_out = __lsx_vmini_d(__m128i_op0,3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85af0000b000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x67eb85af0000b000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x67eb85af0000b000; +- *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x4000c08000000080; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000080c000c080; +- *((unsigned long*)& __m256i_op1[1]) = 0x4000c08000000080; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000080c000c080; +- *((unsigned long*)& __m256i_result[3]) = 0x8000400080ffc080; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080ff0080; +- *((unsigned long*)& __m256i_result[1]) = 0x8000400080ffc080; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080ff0080; +- __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4000c08000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000080c000c080; +- *((unsigned long*)& __m256i_op0[1]) = 0x4000c08000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000080c000c080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000004000; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x31); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x67eb85af0000b000; +- *((unsigned long*)& __m128d_op1[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc07f8000c07f8000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc07f8000c07f8000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fff01fe0; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fff01fe0; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x2a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[3]) = 0xff01fffe00000001; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[1]) = 0xff01fffe00000001; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000400080ffc080; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080ff0080; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000400080ffc080; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080ff0080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff000000000080; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff000000000080; +- *((unsigned long*)& __m256d_result[3]) = 0x416fe00000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x4060000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x416fe00000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x4060000000000000; +- __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x67eb85af0000b000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x67157b5100005000; +- *((unsigned long*)& __m128i_result[0]) = 0x387c7e0a133f2000; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000400080ffc080; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080ff0080; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000400080ffc080; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080ff0080; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x8000400080ffc080; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x8000400080ffc080; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x680485c8b304b019; +- *((unsigned long*)& __m128i_result[0]) = 0xc89d7f0fed582019; +- __m128i_out = __lsx_vaddi_hu(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff01fffe00000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op0[1]) = 0xff01fffe00000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x800080ff800080ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128d_op0[0]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0010001000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x1000000010001000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_hu_w(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000002000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000002000; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; +- *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000003ddc5dac; +- __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x3ddc5dac; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000001030103; +- __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffc606ec5; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000014155445; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x76); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000200000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000200000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256d_op1[3]) = 0x3fffbfff80000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00004000007f8000; +- *((unsigned long*)& __m256d_op1[1]) = 0x3fffbfff80000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00004000007f8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x67157b5100005000; +- *((unsigned long*)& __m128d_op1[0]) = 0x387c7e0a133f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000800080010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000800080010000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000800080010000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000800080010000; +- __m256i_out = __lasx_xvpickev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xfc606ec5; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x14155445; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x01030103; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x3fffbfff80000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00004000007f8000; +- *((unsigned long*)& __m256d_op0[1]) = 0x3fffbfff80000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00004000007f8000; +- *((unsigned long*)& __m256i_result[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x680485c8b304b019; +- *((unsigned long*)& __m128i_op0[0]) = 0xc89d7f0fed582019; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; +- *((unsigned long*)& __m128i_op2[1]) = 0x67157b5100005000; +- *((unsigned long*)& __m128i_op2[0]) = 0x387c7e0a133f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x680485c8b304b019; +- *((unsigned long*)& __m128i_result[0]) = 0xc89d7f0ff90da019; +- __m128i_out = __lsx_vmaddwev_w_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000a95afc60a5c5; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000b6e414157f84; +- *((unsigned long*)& __m128i_result[1]) = 0x0000204264602444; +- *((unsigned long*)& __m128i_result[0]) = 0x0000266404046604; +- __m128i_out = __lsx_vandi_b(__m128i_op0,0x66); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x8000800080008000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; +- *((unsigned long*)& __m128i_op1[1]) = 0x67157b5100005000; +- *((unsigned long*)& __m128i_op1[0]) = 0x387c7e0a133f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000004870ba0; +- __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67157b5100005000; +- *((unsigned long*)& __m128i_op0[0]) = 0x387c7e0a133f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; +- *((unsigned long*)& __m128i_result[1]) = 0x67157b5100005000; +- *((unsigned long*)& __m128i_result[0]) = 0x387c7e0a511b7dac; +- __m128i_out = __lsx_vsadd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x680485c8b304b019; +- *((unsigned long*)& __m128i_op0[0]) = 0xc89d7f0ff90da019; +- *((unsigned long*)& __m128i_op1[1]) = 0x680485c8b304b019; +- *((unsigned long*)& __m128i_op1[0]) = 0xc89d7f0ff90da019; +- *((unsigned long*)& __m128i_result[1]) = 0x00680486ffffffda; +- *((unsigned long*)& __m128i_result[0]) = 0xffff913bfffffffd; +- __m128i_out = __lsx_vsrar_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f010000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f010000000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x007f010100000101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x007f010100000101; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x007f010000000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x007f010000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f010100000101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f010100000101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0008000000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0008000000000010; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x04000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x04000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x04000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x04000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00680486ffffffda; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff913bfffffffd; +- *((unsigned long*)& __m128i_op1[1]) = 0x00680486ffffffda; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff913bfffffffd; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x000000003ddc5dac; +- *((unsigned long*)& __m128i_result[1]) = 0x00680486ffffffda; +- *((unsigned long*)& __m128i_result[0]) = 0xffff913bb9951901; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001030103; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0020006000200060; +- __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000400080ffc080; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000400080ffc080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff80ff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff80ff; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x67eb85af; +- *((int*)& __m128_op0[2]) = 0xb2ebb000; +- *((int*)& __m128_op0[1]) = 0xc8847ef6; +- *((int*)& __m128_op0[0]) = 0xed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00680486; +- *((int*)& __m128_op0[2]) = 0xffffffda; +- *((int*)& __m128_op0[1]) = 0xffff913b; +- *((int*)& __m128_op0[0]) = 0xb9951901; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x01030103; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00200060; +- *((int*)& __m128_op2[0]) = 0x00200060; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0xffffffda; +- *((int*)& __m128_result[1]) = 0xffff913b; +- *((int*)& __m128_result[0]) = 0x001fed4d; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00080000; +- *((int*)& __m256_op0[4]) = 0x00000010; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00080000; +- *((int*)& __m256_op0[0]) = 0x00000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x7f010000; +- *((int*)& __m256_op0[5]) = 0x00010000; +- *((int*)& __m256_op0[4]) = 0x00007f7f; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x7f010000; +- *((int*)& __m256_op0[1]) = 0x00010000; +- *((int*)& __m256_op0[0]) = 0x00007f7f; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00680486ffffffda; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff913bb9951901; +- *((unsigned long*)& __m128i_op1[1]) = 0x67157b5100005000; +- *((unsigned long*)& __m128i_op1[0]) = 0x387c7e0a133f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000003; +- *((unsigned long*)& __m128i_result[0]) = 0x0c0f000a070f0204; +- __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x478b478b38031779; +- *((unsigned long*)& __m128i_op0[0]) = 0x6b769e690fa1e119; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001030103; +- *((unsigned long*)& __m128i_result[1]) = 0x0047004700380017; +- *((unsigned long*)& __m128i_result[0]) = 0x006bff9e0010ffe2; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; +- *((unsigned long*)& __m128i_op1[1]) = 0x478b478b38031779; +- *((unsigned long*)& __m128i_op1[0]) = 0x6b769e690fa1e119; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000004870ba0; +- __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000003ddc5dac; +- *((unsigned long*)& __m128i_result[1]) = 0x67ebb2ebc884ed3f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000003ddc; +- __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001030103; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000103; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_w_d(__m256i_op0,__m256i_op1,0x39); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000003ddc5dac; +- long_int_result = 0x000000003ddc5dac; +- long_int_out = __lsx_vpickve2gr_d(__m128i_op0,0x0); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001030103; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6b75948a91407a42; +- *((unsigned long*)& __m128i_op0[0]) = 0x0b5471b633e54fde; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000004870ba0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000004870ba0; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000004870ba0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x3f80000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x4000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x3f80000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x4efffe00; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x47000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x4efffe00; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x47000000; +- __m256_out = __lasx_xvffint_s_w(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000007fff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000017fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000017fff; +- __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xfffffffc; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xfffffffc; +- *((int*)& __m128_op1[3]) = 0x00000001; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000103; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcvt_h_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000017fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000017fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x04870ba0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; +- *((unsigned long*)& __m128i_op1[1]) = 0x478b478b38031779; +- *((unsigned long*)& __m128i_op1[0]) = 0x6b769e690fa1e119; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fe98c2a0; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x80ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff00ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x80ff00ff00ff00ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff8000fffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00017fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff8000fffe; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001fffe00017fff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000007f00fe; +- *((unsigned long*)& __m256i_result[2]) = 0x000000fe0000007f; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000007f00fe; +- *((unsigned long*)& __m256i_result[0]) = 0x000000fe0000007f; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000103; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffc; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001000000010; +- __m128i_out = __lsx_vssrlrn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x3a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128d_op1[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000103; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000100000000; +- *((unsigned long*)& __m128d_result[0]) = 0x8000000000000103; +- __m128d_out = __lsx_vfmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000010; +- *((unsigned long*)& __m128i_op2[1]) = 0x8000000100000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x8000000000000103; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010300000103; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010300000000; +- __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000047000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000047000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; +- __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d975d7fe; +- *((unsigned long*)& __m128i_result[0]) = 0xe4423f7b769f8ffe; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000010000ff00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000010000ff00; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; +- *((unsigned long*)& __m128i_op0[0]) = 0xe4423f7b769f8ffe; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d975d7fe; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fe96fe95; +- *((unsigned long*)& __m256i_op0[2]) = 0x6afc01000001ff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fe96fe95; +- *((unsigned long*)& __m256i_op0[0]) = 0x6afc01000001ff00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000010000ff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000010000ff00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x7e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsat_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x33f5c2d7d975d7fe; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff010000ff01; +- __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff010000ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000047000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000047000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vextl_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000956a00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000956a00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x007fffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xb500000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x007fffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xb500000000000000; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x29); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000956a; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000956a; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_op1[3]) = 0x007fffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xb500000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x007fffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xb500000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x007fffffffff9569; +- *((unsigned long*)& __m256i_result[2]) = 0xb50000004efffe00; +- *((unsigned long*)& __m256i_result[1]) = 0x007fffffffff9569; +- *((unsigned long*)& __m256i_result[0]) = 0xb50000004efffe00; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000956a; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000956a; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000000000956a; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000000000956a; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000004efffe00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000057348fe3; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000057348fe3; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- long_int_result = 0x000000000000ffff; +- long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x0); +- *((int*)& __m256_op0[7]) = 0x0000ff01; +- *((int*)& __m256_op0[6]) = 0x00ff0000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000ff01; +- *((int*)& __m256_op0[3]) = 0x0000ff01; +- *((int*)& __m256_op0[2]) = 0x00ff0000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000ff01; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x67eb85b0b2ebb001; +- *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x67eb85b0; +- *((int*)& __m128_op0[2]) = 0xb2ebb001; +- *((int*)& __m128_op0[1]) = 0xc8847ef6; +- *((int*)& __m128_op0[0]) = 0xed3f2000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; +- unsigned_int_result = 0x0000000000100010; +- unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x38); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256d_op1[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; +- __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xa87745dbd93e4ea1; +- *((unsigned long*)& __m128i_op1[0]) = 0xaa49601e26d39860; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001f0000001f; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000101000001010; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000101000001010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000020; +- *((int*)& __m128_op1[2]) = 0x00000020; +- *((int*)& __m128_op1[1]) = 0x0000001f; +- *((int*)& __m128_op1[0]) = 0x0000001f; +- *((int*)& __m128_result[3]) = 0x00000020; +- *((int*)& __m128_result[2]) = 0x00000020; +- *((int*)& __m128_result[1]) = 0x0000001f; +- *((int*)& __m128_result[0]) = 0x0000001f; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x33eac9fdca42f660; +- *((unsigned long*)& __m128i_op0[0]) = 0xaa472d26fe867091; +- *((unsigned long*)& __m128i_op1[1]) = 0x33eac9fdca42f660; +- *((unsigned long*)& __m128i_op1[0]) = 0xaa472d26fe867091; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff5; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff5; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff5; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff5; +- __m256i_out = __lasx_xvmaxi_d(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xc0c0c0c0c0c0c0c0; +- *((unsigned long*)& __m128i_result[0]) = 0xc0c0c0c0c0c0c0c0; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_result[0]) = 0x000000008000001e; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffff5; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffff5; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffff5; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffff5; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; +- __m256i_out = __lasx_xvadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000008000001e; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffe1; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff7fffffe2; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000001f; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000008000001e; +- *((unsigned long*)& __m128i_result[1]) = 0x000000200000001b; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000000; +- __m128i_out = __lsx_vclz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0xd48acbfe13102acf; +- *((unsigned long*)& __m128i_result[0]) = 0xf4af70d0c4000000; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x67eb8590b2ebafe1; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001f00000000; +- __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00100010; +- *((int*)& __m256_op1[5]) = 0x00100010; +- *((int*)& __m256_op1[4]) = 0x00100010; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00100010; +- *((int*)& __m256_op1[1]) = 0x00100010; +- *((int*)& __m256_op1[0]) = 0x00100010; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000020; +- *((int*)& __m128_op1[2]) = 0x00000020; +- *((int*)& __m128_op1[1]) = 0x0000001f; +- *((int*)& __m128_op1[0]) = 0x0000001f; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000100010; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000200000001b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0xd400c02000002acf; +- *((unsigned long*)& __m128i_op1[0]) = 0xf4000020c4000000; +- *((unsigned long*)& __m128i_result[1]) = 0x6453f5e01d6e5000; +- *((unsigned long*)& __m128i_result[0]) = 0x000fdec000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001f0000001f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4000000040000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6453f5e01d6e5000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000fdec000000000; +- int_result = 0x000000001d6e5000; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0x801dd5cb0004e058; +- *((unsigned long*)& __m128i_op0[0]) = 0x77eb15638eeb5fc2; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000200000001b; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000004e03d; +- *((unsigned long*)& __m128i_result[0]) = 0x000000008eeb5fc2; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000101000001010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000101000001010; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op2[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op2[0]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000101000001010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000101000001010; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x0000ff01; +- *((int*)& __m256_op0[6]) = 0x00ff0000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000ff01; +- *((int*)& __m256_op0[3]) = 0x0000ff01; +- *((int*)& __m256_op0[2]) = 0x00ff0000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000ff01; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000808; +- *((int*)& __m256_op1[4]) = 0x00000808; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000808; +- *((int*)& __m256_op1[0]) = 0x00000808; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000001ffe00000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000001ffe00000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff010ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff010ff0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000201; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000201; +- __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffac0a000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x801d5de0000559e0; +- *((unsigned long*)& __m128i_op1[0]) = 0x77eb86788eebafe1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffac00000000; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffac0a000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ac00000000; +- __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff010ff0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff010ff0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x801d5de0000559e0; +- *((unsigned long*)& __m128i_op0[0]) = 0x77eb86788eebaf00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x2e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffac0a000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000200000001b; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffac0a000000; +- __m128i_out = __lsx_vmaddwod_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fff000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fff000000000; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff010ff0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff010ff0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ff0100ff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff01; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x6f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000ac00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffac0a000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffac0a000000; +- __m128i_out = __lsx_vabsd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffac0a000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000085af0000b000; +- *((unsigned long*)& __m128i_result[0]) = 0x00017ea200002000; +- __m128i_out = __lsx_vaddwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000fff000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000fff000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000001; +- *((int*)& __m256_op1[6]) = 0xffe00000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000001; +- *((int*)& __m256_op1[2]) = 0xffe00000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaxi_h(__m128i_op0,-2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000085af0000b000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00017ea200002000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_w_d(__m128i_op0,__m128i_op1,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x67eb85afb2ebb000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; +- *((unsigned long*)& __m128i_result[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128i_result[0]) = 0x377b810912c0e000; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvmaxi_d(__m256i_op0,7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_result[0]) = 0xfffefffefffefffe; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00ff00ff; +- *((int*)& __m256_op0[6]) = 0x00ff00ff; +- *((int*)& __m256_op0[5]) = 0x00ff00ff; +- *((int*)& __m256_op0[4]) = 0x00ff00ff; +- *((int*)& __m256_op0[3]) = 0x00ff00ff; +- *((int*)& __m256_op0[2]) = 0x00ff00ff; +- *((int*)& __m256_op0[1]) = 0x00ff00ff; +- *((int*)& __m256_op0[0]) = 0x00ff00ff; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffefffe00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffefffe00000000; +- __m128i_out = __lsx_vpackod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000085af0000b000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00017ea200002000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff7; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128d_op0[0]) = 0x377b810912c0e000; +- *((unsigned long*)& __m128d_op1[1]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128d_op1[0]) = 0xfffffffffffffff7; +- *((unsigned long*)& __m128d_result[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128d_result[0]) = 0x377b810912c0e000; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128i_op0[0]) = 0x377b810912c0e000; +- *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; +- *((unsigned long*)& __m128i_result[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128i_result[0]) = 0x377b810912c0e000; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffefffe00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x98147a4f4d144fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x377b810812c0dfff; +- *((unsigned long*)& __m128i_result[1]) = 0x98137a4d4d144fff; +- *((unsigned long*)& __m128i_result[0]) = 0x377a810612c0dfff; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffefffe00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; +- *((unsigned long*)& __m128i_op2[1]) = 0x5a57bacbd7e39680; +- *((unsigned long*)& __m128i_op2[0]) = 0x6bae051ffed76001; +- *((unsigned long*)& __m128i_result[1]) = 0xf3eb458161080000; +- *((unsigned long*)& __m128i_result[0]) = 0xffe9454286c0e000; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5a57bacbd7e39680; +- *((unsigned long*)& __m128i_op0[0]) = 0x6bae051ffed76001; +- *((unsigned long*)& __m128i_op1[1]) = 0xf3e6586b60d7b152; +- *((unsigned long*)& __m128i_op1[0]) = 0xf7077b934ac0e000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4e3e133738bb47d2; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000005; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128i_op0[0]) = 0x377b810912c0e000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4e3e133738bb47d2; +- *((unsigned long*)& __m128i_result[1]) = 0xff98007a004d0050; +- *((unsigned long*)& __m128i_result[0]) = 0xfff9ff4a0057000e; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128i_op0[0]) = 0x377b810912c0e000; +- *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000000000000; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128i_op1[0]) = 0x377b810912c0e000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffff00; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffff00; +- __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080805; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080805; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xf3e6586b; +- *((int*)& __m128_op0[2]) = 0x60d7b152; +- *((int*)& __m128_op0[1]) = 0xf7077b93; +- *((int*)& __m128_op0[0]) = 0x4ac0e000; +- *((int*)& __m128_op1[3]) = 0x1498507a; +- *((int*)& __m128_op1[2]) = 0x144d0050; +- *((int*)& __m128_op1[1]) = 0x7b370981; +- *((int*)& __m128_op1[0]) = 0xc01200e0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_result[1]) = 0x000001fffdfffdff; +- *((unsigned long*)& __m128i_result[0]) = 0x000001fffdfffdff; +- __m128i_out = __lsx_vsrli_d(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x43); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0080000000800000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0080000000800000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0080000000800000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_h(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x4e3e1337; +- *((int*)& __m128_op0[0]) = 0x38bb47d2; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0x41e80000; +- *((int*)& __m128_result[0]) = 0xc1600000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000001fffdfffdff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000001fffdfffdff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010101010101; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010101010101; +- __m128i_out = __lsx_vmini_bu(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128d_op1[1]) = 0x000700000004fdff; +- *((unsigned long*)& __m128d_op1[0]) = 0x000300000000fdff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0080000000800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; +- __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffff7f8c; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x98147a504d145000; +- *((unsigned long*)& __m128d_op1[0]) = 0x377b810912c0e000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0808080808080805; +- *((unsigned long*)& __m128i_op0[0]) = 0x0808080808080805; +- *((unsigned long*)& __m128i_result[1]) = 0x0020002000200020; +- *((unsigned long*)& __m128i_result[0]) = 0x0020002000200014; +- __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x7ff80000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x7ff80000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x7ff80000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x7ff80000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4e3e133738bb47d2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x9c7c266e71768fa4; +- __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x000700000004fdff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000300000000fdff; +- *((unsigned long*)& __m128i_result[1]) = 0xfff7fffefffa01ff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffbfffefffe01ff; +- __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff6ff4ffff8db8; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffbaf4ffffb805; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x9c7c266e71768fa4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfff4ffb800ff0080; +- __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000005; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000005; +- *((int*)& __m128_op1[3]) = 0xfffefffe; +- *((int*)& __m128_op1[2]) = 0xfffefffe; +- *((int*)& __m128_op1[1]) = 0xfffefffe; +- *((int*)& __m128_op1[0]) = 0xfffefffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvrotr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000040; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000040; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xfff7fffe; +- *((int*)& __m128_op0[2]) = 0xfffa01ff; +- *((int*)& __m128_op0[1]) = 0xfffbfffe; +- *((int*)& __m128_op0[0]) = 0xfffe01ff; +- *((int*)& __m128_result[3]) = 0xfff7fffe; +- *((int*)& __m128_result[2]) = 0xfffa01ff; +- *((int*)& __m128_result[1]) = 0xfffbfffe; +- *((int*)& __m128_result[0]) = 0xfffe01ff; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op1[1]) = 0x000700000004fdff; +- *((unsigned long*)& __m128i_op1[0]) = 0x000300000000fdff; +- *((unsigned long*)& __m128i_result[1]) = 0x0006fff20003fff8; +- *((unsigned long*)& __m128i_result[0]) = 0x0002fffa00000000; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff7fffefffa01ff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffbfffefffe01ff; +- *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; +- *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; +- *((unsigned long*)& __m128i_result[1]) = 0x0305030203020502; +- *((unsigned long*)& __m128i_result[0]) = 0x0301030203020502; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4e3e13368c17f6e6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00009c7c00007176; +- __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefe01010101; +- *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefe01010101; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfefefefe01010101; +- *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfefefefe01010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefe01010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefe01010101; +- __m256i_out = __lasx_xvsrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; +- *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; +- *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcfcfcfcfcfd; +- *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcfcfcfcfcfd; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000005; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0305030203020502; +- *((unsigned long*)& __m128i_op0[0]) = 0x0301030203020502; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000003050302; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000003010302; +- __m128i_out = __lsx_vaddwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_du(__m256i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x03050302; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x03010302; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xfefefefe; +- *((int*)& __m256_op0[4]) = 0x01010101; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xfefefefe; +- *((int*)& __m256_op0[0]) = 0x01010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefe3f800000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefe3f800000; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x9c7c266e71768fa4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x40404040; +- *((int*)& __m256_op1[6]) = 0x40404040; +- *((int*)& __m256_op1[5]) = 0x40404040; +- *((int*)& __m256_op1[4]) = 0x40404040; +- *((int*)& __m256_op1[3]) = 0x40404040; +- *((int*)& __m256_op1[2]) = 0x40404040; +- *((int*)& __m256_op1[1]) = 0x40404040; +- *((int*)& __m256_op1[0]) = 0x40404040; +- *((int*)& __m256_result[7]) = 0x40404040; +- *((int*)& __m256_result[6]) = 0x40404040; +- *((int*)& __m256_result[5]) = 0x40404040; +- *((int*)& __m256_result[4]) = 0x40404040; +- *((int*)& __m256_result[3]) = 0x40404040; +- *((int*)& __m256_result[2]) = 0x40404040; +- *((int*)& __m256_result[1]) = 0x40404040; +- *((int*)& __m256_result[0]) = 0x40404040; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000071768fa4; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[3]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_result[2]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_result[1]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_result[0]) = 0x0404000004040000; +- __m256i_out = __lasx_xvslli_w(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00009c7c00007176; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000040; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000040; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x40404040; +- *((int*)& __m256_op2[6]) = 0x40404040; +- *((int*)& __m256_op2[5]) = 0x40404040; +- *((int*)& __m256_op2[4]) = 0x40404040; +- *((int*)& __m256_op2[3]) = 0x40404040; +- *((int*)& __m256_op2[2]) = 0x40404040; +- *((int*)& __m256_op2[1]) = 0x40404040; +- *((int*)& __m256_op2[0]) = 0x40404040; +- *((int*)& __m256_result[7]) = 0xc0404040; +- *((int*)& __m256_result[6]) = 0xc0404040; +- *((int*)& __m256_result[5]) = 0xc0404040; +- *((int*)& __m256_result[4]) = 0xc0404040; +- *((int*)& __m256_result[3]) = 0xc0404040; +- *((int*)& __m256_result[2]) = 0xc0404040; +- *((int*)& __m256_result[1]) = 0xc0404040; +- *((int*)& __m256_result[0]) = 0xc0404040; +- __m256_out = __lasx_xvfmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_int_result = 0x0000000000000000; +- unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x3); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfefefefe3f800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfefefefe3f800000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000fe0000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000fe0000000; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; +- *((unsigned long*)& __m128i_op1[0]) = 0xfcfcfcfcfcfc0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfefefefe3f800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfefefefe3f800000; +- *((unsigned long*)& __m256i_result[3]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefeffe0e0e0; +- *((unsigned long*)& __m256i_result[1]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefeffe0e0e0; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0xe0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfefefefe3f800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfefefefe3f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000040404040; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op1[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1010101010101010; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x3a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1010101010101010; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000404; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000404; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00009c7c00007176; +- *((unsigned long*)& __m128d_op1[1]) = 0xfcfcfcfcfcfcfcfd; +- *((unsigned long*)& __m128d_op1[0]) = 0xfcfcfcfcfcfc0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0404000004040000; +- *((unsigned long*)& __m256i_result[3]) = 0x4000400040004000; +- *((unsigned long*)& __m256i_result[2]) = 0x4000400040004000; +- *((unsigned long*)& __m256i_result[1]) = 0x4000400040004000; +- *((unsigned long*)& __m256i_result[0]) = 0x4000400040004000; +- __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000040004000; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000404; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000404; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000020202000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000020202000; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_h(__m128i_op0,-16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_w(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000404; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000404; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_result[2]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_result[1]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_result[0]) = 0x0404040404040404; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256d_op0[2]) = 0xfefefefeffe0e0e0; +- *((unsigned long*)& __m256d_op0[1]) = 0xe0e0e0e0e0e0e0e0; +- *((unsigned long*)& __m256d_op0[0]) = 0xfefefefeffe0e0e0; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000040004000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000040004000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfefefefe3f800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfefefefe3f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; +- __m256i_out = __lasx_xvssrlrni_h_w(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfcfcfcfcfcfcfcfd; +- *((unsigned long*)& __m128i_op0[0]) = 0xfcfcfcfcfcfc0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00009c7c00007176; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffcfcfcfc; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffcfc6080; +- __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00009c7c00007176; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x9c7c266e3faa293c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_b(__m128i_op0,0xe); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e3faa293c; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000f3040705; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00009c7c; +- *((int*)& __m128_op0[0]) = 0x00007176; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xf3040705; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0xf3040705; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0xf3040705; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000f3040705; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000f3040705; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_op1[2]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_op1[1]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_op1[0]) = 0x0404040404040404; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003f800000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003f800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000040404040; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7c7c000000007176; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x3e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x40404040; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x40404040; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xfefefefe; +- *((int*)& __m256_op1[4]) = 0x3f800000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xfefefefe; +- *((int*)& __m256_op1[0]) = 0x3f800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000040404040; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_b(__m128i_op0,-12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000f3040705; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000f3040705; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000020202000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000020202000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x3d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7c7c000000007176; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000f3040705; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000001f1f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffefefefe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000040404040; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfe01fe01fd02fd02; +- *((unsigned long*)& __m256i_result[2]) = 0x000000003fc03fc0; +- *((unsigned long*)& __m256i_result[1]) = 0xfe01fe01fd02fd02; +- *((unsigned long*)& __m256i_result[0]) = 0x000000003fc03fc0; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7c7c9c0000007176; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x00000000f3040705; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7c7c9c0000007176; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000f3040705; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000f3040705; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000001f1f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff000000001f1f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000404; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000404; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7c7c9c0000007176; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x00ff000000001f1f; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7c7c9c0000007176; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfe01fe01fc01fc01; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_op1[1]) = 0xfe01fe01fc01fc01; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_result[2]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_result[0]) = 0x000000003fc03bbc; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe01fd02fd02; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03fc0; +- *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe01fd02fd02; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03fc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010100; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000405; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000405; +- *((unsigned long*)& __m256i_result[3]) = 0xfe01fe017e81fd02; +- *((unsigned long*)& __m256i_result[2]) = 0x000000003fc001fe; +- *((unsigned long*)& __m256i_result[1]) = 0xfe01fe017e81fd02; +- *((unsigned long*)& __m256i_result[0]) = 0x000000003fc001fe; +- __m256i_out = __lasx_xvsrl_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010100; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000405; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000405; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfc01fc0101fe01dd; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfc01fc0101fe01dd; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbfd10d0d7b6b6b73; +- *((unsigned long*)& __m128i_op0[0]) = 0xc5c53492f25acbf2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000f3040705; +- *((unsigned long*)& __m128i_result[1]) = 0xbfd10d0d7b6b6b73; +- *((unsigned long*)& __m128i_result[0]) = 0xc5c534920000c4ed; +- __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x41cfe01dde000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x41cfe01dde000000; +- __m256d_out = __lasx_xvffintl_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xff000000001f1f00; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00009c7c00007176; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe01fc01fc01; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe01fc01fc01; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfc01000000003fc0; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfc01000000003fc0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbfd10d0d7b6b6b73; +- *((unsigned long*)& __m128i_op0[0]) = 0xc5c53492f25acbf2; +- *((unsigned long*)& __m128i_op1[1]) = 0xff000000001f1f00; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xbfd10d0d7b6b6b73; +- *((unsigned long*)& __m128i_result[0]) = 0xc5c53492f25acbf2; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x30); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfe01fe01; +- *((int*)& __m256_op0[6]) = 0x7e81fd02; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x3fc001fe; +- *((int*)& __m256_op0[3]) = 0xfe01fe01; +- *((int*)& __m256_op0[2]) = 0x7e81fd02; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x3fc001fe; +- *((int*)& __m256_op1[7]) = 0xfe01fe01; +- *((int*)& __m256_op1[6]) = 0x7e81fd02; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x3fc001fe; +- *((int*)& __m256_op1[3]) = 0xfe01fe01; +- *((int*)& __m256_op1[2]) = 0x7e81fd02; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x3fc001fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; +- *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfe01fe017e81fd02; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000003fc001fe; +- *((unsigned long*)& __m256d_op0[1]) = 0xfe01fe017e81fd02; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000003fc001fe; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x41cfe01dde000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x41cfe01dde000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x41cfe01dde000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x41cfe01dde000000; +- __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x41cfe01dde000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x41cfe01dde000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000013fc03bbc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000013fc03bbc; +- __m256i_out = __lasx_xvssrln_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000001010100; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000405; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000001010100; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000405; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000001010100; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000405; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000001010100; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000405; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff00000000; +- __m128i_out = __lsx_vslti_h(__m128i_op0,-15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x01010100; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000405; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x01010100; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000405; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x01010100; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000405; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x01010100; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000405; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x01010100; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x00000405; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x01010100; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x00000405; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe01fd02fd02; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03fc0; +- *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe01fd02fd02; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03fc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3f00c0003f00c000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3f00c0003f00c000; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; +- *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; +- *((unsigned long*)& __m128i_result[1]) = 0xbfd10d0d7b6b6b73; +- *((unsigned long*)& __m128i_result[0]) = 0xc5c534920000c4ed; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffc01fc01; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000003fc03bbc; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffe00fe00; +- *((unsigned long*)& __m256i_result[2]) = 0x000000001fe01dde; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffe00fe00; +- *((unsigned long*)& __m256i_result[0]) = 0x000000001fe01dde; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; +- *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; +- __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,-2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000001010100; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000405; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000001010100; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000405; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffe00000ffe00000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffe00000ffe00000; +- __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_w(__m256i_op0,__m256i_op1,0xc2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffe00000ffe00000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffe00000ffe00000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000001010100; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000405; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000001010100; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000405; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000600000006; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0xf6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001010100; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000405; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001010100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000405; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000800080; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000800080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000202; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xbfd10d0d7b6b6b73; +- *((unsigned long*)& __m128i_op1[0]) = 0xc5c534920000c4ed; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xedededededededed; +- *((unsigned long*)& __m128i_result[0]) = 0xedededededededed; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00800080; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000202; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00800080; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000202; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00800080; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000202; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00800080; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000202; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00009c7c00007176; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000009c007c00; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000071007600; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000010000000100; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000010000000100; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000010000000100; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x1fa0000000080000; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrecip_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000009c007c00; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000071007600; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000009000900; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000009000900; +- __m128i_out = __lsx_vmini_bu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000009000900; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000009000900; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; +- __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xd454545454545454; +- *((unsigned long*)& __m128i_result[0]) = 0xd454545454545454; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x54); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1f60010000080100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1f60010000080100; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfe01fe010000fd02; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000003fc03fc0; +- *((unsigned long*)& __m256i_op0[1]) = 0xfe01fe010000fd02; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000003fc03fc0; +- *((unsigned long*)& __m256i_op1[3]) = 0xfe01fe010000fd02; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000003fc03fc0; +- *((unsigned long*)& __m256i_op1[1]) = 0xfe01fe010000fd02; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000003fc03fc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007f807f80; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007f807f80; +- __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_d(__m256i_op0,15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000800080; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000800080; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1fa0000000080000; +- __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000007fffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000007fffff; +- __m256i_out = __lasx_xvsat_du(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlri_h(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000009000900; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000009000900; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000009000900; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000009000900; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000009000900; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000009000900; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_d(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffc000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffeff000c057c; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffc000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffeff000c057c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000f0f0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000f0f0; +- __m256i_out = __lasx_xvmskltz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000800080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000800080; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000202; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000202; +- __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfrint_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0606060606060606; +- *((unsigned long*)& __m128i_result[0]) = 0x0606060606060606; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000f0f0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000f0f0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000f0f0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000f0f0; +- __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000f0f0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000f0f0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007878; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000007878; +- __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfsub_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1f60010000080100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1f60010000080100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1f60010000080100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1f60010000080100; +- __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000007878; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000007878; +- *((unsigned long*)& __m256i_result[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000107878; +- *((unsigned long*)& __m256i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000107878; +- __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x80000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xff88ff88; +- *((int*)& __m256_op0[3]) = 0x80000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xff88ff88; +- *((int*)& __m256_op1[7]) = 0xfe01fe01; +- *((int*)& __m256_op1[6]) = 0x0000fd02; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x3fc03fc0; +- *((int*)& __m256_op1[3]) = 0xfe01fe01; +- *((int*)& __m256_op1[2]) = 0x0000fd02; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x3fc03fc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; +- __m256d_out = __lasx_xvfdiv_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_result[3]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_result[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_result[1]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_result[0]) = 0x1fa0000000080000; +- __m256i_out = __lasx_xvrepl128vei_d(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256d_op1[2]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256d_op1[1]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256d_op1[0]) = 0x1fa0000000080000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x000000003ddc5dac; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_d(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmina_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0010001000100010; +- *((unsigned long*)& __m256d_op1[2]) = 0x0010001000107878; +- *((unsigned long*)& __m256d_op1[1]) = 0x0010001000100010; +- *((unsigned long*)& __m256d_op1[0]) = 0x0010001000107878; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00800080; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000202; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00800080; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000202; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0xff88ff88; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0xff88ff88; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0xffc8ff88; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0xffc8ff88; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000020; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x56a09e662ab46b31; +- *((unsigned long*)& __m128d_op1[0]) = 0xb4b8122ef4054bb3; +- *((unsigned long*)& __m128d_result[1]) = 0xd6a09e662ab46b31; +- *((unsigned long*)& __m128d_result[0]) = 0x34b8122ef4054bb3; +- __m128d_out = __lsx_vfsub_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd6a09e662ab46b31; +- *((unsigned long*)& __m128i_op0[0]) = 0x34b8122ef4054bb3; +- *((unsigned long*)& __m128i_result[1]) = 0xd6e09e262af46b71; +- *((unsigned long*)& __m128i_result[0]) = 0x34f8126ef4454bf3; +- __m128i_out = __lsx_vbitrevi_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x80000000ffc8ff88; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x80000000ffc8ff88; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001ff91ff100000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001ff91ff100000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x56a09e662ab46b31; +- *((unsigned long*)& __m128i_op1[0]) = 0xb4b8122ef4054bb3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4b47edd10bfab44d; +- __m128i_out = __lsx_vhsubw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001ff91ff100000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001ff91ff100000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000800080; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000800080; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000202; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffffff7fff80; +- *((unsigned long*)& __m256i_result[2]) = 0x0001ff91ff0ffdfe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffffff7fff80; +- *((unsigned long*)& __m256i_result[0]) = 0x0001ff91ff0ffdfe; +- __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f807f80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f807f80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000007f7f; +- __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007f433c78; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000a0008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000a0008; +- __m256i_out = __lasx_xvpcnt_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd6a09e662ab46b31; +- *((unsigned long*)& __m128i_op0[0]) = 0x34b8122ef4054bb3; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xeb504f33155a3598; +- *((unsigned long*)& __m128i_result[0]) = 0x1a5c0917fa02a5d9; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrph_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000a0008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000a0008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007f433c78; +- __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavg_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f807f80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f807f80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; +- __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_w(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000a0008; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000a0008; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffff5fff7; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffff5fff7; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vavgr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007f433c78; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007f433c78; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000007f433c79; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007f433c79; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000007f8000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000007f8000; +- __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000007f8000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000007f8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000029; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000029; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000029; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000029; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000029; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000a0008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000a0008; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007f807f80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007f807f80; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000007f8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000007f8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x7b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_w(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x9c9c9c9c9c9c9c9c; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000000a0008; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000000a0008; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[1]) = 0x4e4e4e4e00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; +- __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000900000020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff00; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x477f0000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x477f0000; +- __m256_out = __lasx_xvffint_s_w(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_q(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[2]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[0]) = 0xebebebebebebebeb; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000f788f788; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000f788f788; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_d(__m256i_op0,14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xbff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xbff0000000000000; +- __m128d_out = __lsx_vffint_d_l(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op0[2]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op0[1]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op0[0]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrneh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000c6c6c6c6; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6c6c6c6; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op0[2]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op0[1]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op0[0]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[2]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[0]) = 0xebebebebebebebeb; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- long_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3131313131313131; +- *((unsigned long*)& __m128i_op0[0]) = 0x3131313131313131; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3131313131313131; +- __m128i_out = __lsx_vextl_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000f788f788; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000f788f788; +- *((unsigned long*)& __m256i_result[3]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvslei_bu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd6a09e662ab46b31; +- *((unsigned long*)& __m128i_op0[0]) = 0x34b8122ef4054bb3; +- *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9b509be72f; +- *((unsigned long*)& __m128i_op1[0]) = 0x3513f2e3a1774d2c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000501ffff0005; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xc6c6c6c6; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xc6c6c6c6; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0xc6c6c6c6; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0xc6c6c6c6; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9ca19d509ae734; +- *((unsigned long*)& __m128i_op0[0]) = 0xd1b09480f2123460; +- *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001fffeff98; +- *((unsigned long*)& __m128i_result[0]) = 0x0014ffe4ff76ffc4; +- __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3131313131313131; +- *((unsigned long*)& __m128i_result[1]) = 0x0313100003131000; +- *((unsigned long*)& __m128i_result[0]) = 0x0313100003131000; +- __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000600000006; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000600000006; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000f788f788; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000f788f788; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000f788f788; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000f788f788; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x007f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x007f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f00000000; +- __m256i_out = __lasx_xvavg_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0313100003131000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0313100003131000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,-14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffeff98; +- *((unsigned long*)& __m128i_op0[0]) = 0x0014ffe4ff76ffc4; +- *((unsigned long*)& __m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000010; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000501ffff0005; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000600000001; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000001fffeff98; +- *((unsigned long*)& __m128i_op0[0]) = 0x0014ffe4ff76ffc4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3131313131313131; +- *((unsigned long*)& __m128i_result[1]) = 0x000000017fff7fcc; +- *((unsigned long*)& __m128i_result[0]) = 0x18a3188b9854187b; +- __m128i_out = __lsx_vavgr_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000600000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000c6c6c6c6; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000c6c6c6c6; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000c6c7; +- *((unsigned long*)& __m128i_result[0]) = 0x8d8d8d8d8d8cc6c6; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000c6c6c6c6; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c6c6c6c6; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001fffeff98; +- *((unsigned long*)& __m128i_op1[0]) = 0x0014ffe4ff76ffc4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3131313131313131; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_d(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000c6c7; +- *((unsigned long*)& __m128i_op0[0]) = 0x8d8d8d8d8d8cc6c6; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_w_d(__m128i_op0,__m128i_op1,0x3c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x31313131; +- *((int*)& __m128_op0[0]) = 0x31313131; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x31313131; +- *((int*)& __m128_op1[0]) = 0x31313131; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000008; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000008; +- *((int*)& __m128_result[1]) = 0xa2f54a1e; +- *((int*)& __m128_result[0]) = 0xa2f54a1e; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000008; +- *((unsigned long*)& __m128i_op1[0]) = 0xa2f54a1ea2f54a1e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[0]) = 0x00004a1e00004a1e; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000013; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000013; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000013; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000013; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000013; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x00004a1e00004a1e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000100; +- *((unsigned long*)& __m128i_result[0]) = 0x4000000040000000; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_d(__m128i_op0,14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0313100003131000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0313100003131000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_d(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_b(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x80000000; +- *((int*)& __m128_op0[2]) = 0x80000008; +- *((int*)& __m128_op0[1]) = 0xa2f54a1e; +- *((int*)& __m128_op0[0]) = 0xa2f54a1e; +- *((int*)& __m128_op1[3]) = 0x80000000; +- *((int*)& __m128_op1[2]) = 0x80000008; +- *((int*)& __m128_op1[1]) = 0xa2f54a1e; +- *((int*)& __m128_op1[0]) = 0xa2f54a1e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0313100003131000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0313100003131000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000013; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000200008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000200000; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x6a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000200008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000200008; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000200000; +- __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffed; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000200008; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffff00ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffff00ffff; +- __m128i_out = __lsx_vslei_b(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000013; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_w_d(__m256i_op0,__m256i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000020000000200; +- __m256i_out = __lasx_xvfclass_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff88ff88; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000fffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fffffffe; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffff0078ffff0078; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffff0078ffff0078; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- int_result = 0xffffffffffffffff; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x3); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff88ff88; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x8d78336c83652b86; +- *((unsigned long*)& __m128i_op1[0]) = 0x39c51f389c0d6112; +- *((unsigned long*)& __m128i_op2[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffff0001ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ff9b0082; +- *((unsigned long*)& __m128i_result[0]) = 0x003a0037fff2fff8; +- __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000201fe01fc; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000201fe01fc; +- __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8d78336c83652b86; +- *((unsigned long*)& __m128i_op0[0]) = 0x39c51f389c0d6112; +- int_result = 0xffffffff9c0d6112; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); +- *((unsigned long*)& __m128i_op0[1]) = 0x8d78336c83652b86; +- *((unsigned long*)& __m128i_op0[0]) = 0x39c51f389c0d6112; +- *((unsigned long*)& __m128i_result[1]) = 0x00000001ce28f9c0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000004e06b0890; +- __m128i_out = __lsx_vsllwil_du_wu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000001ce28f9c0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000004e06b0890; +- *((unsigned long*)& __m128i_result[1]) = 0xfefefefdbffefdfe; +- *((unsigned long*)& __m128i_result[0]) = 0xfefefeeffef7fefe; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff7300000ca00430; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001a00000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_hu(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000020000000200; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0101010240010202; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe00; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefefefdbffefdfe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfefefeeffef7feff; +- *((unsigned long*)& __m128i_result[1]) = 0xfcfcfcffbdfcfffc; +- *((unsigned long*)& __m128i_result[0]) = 0xfcfcfcedfcf5fcfd; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000fffe; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000f0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000ffff88ff88; +- *((unsigned long*)& __m256i_op2[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x000000ffff88ff88; +- *((unsigned long*)& __m256i_result[3]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_result[2]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_result[1]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_result[0]) = 0xff88ff88ff880000; +- __m256i_out = __lasx_xvshuf_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256d_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x000000ffff88ff88; +- *((unsigned long*)& __m256d_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000000ffff88ff88; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsat_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefefefdbffefdfe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfefefeeffef7fefe; +- int_op1 = 0xffffffff9c0d6112; +- *((unsigned long*)& __m128i_result[1]) = 0xbffefdfebffefdfe; +- *((unsigned long*)& __m128i_result[0]) = 0xbffefdfebffefdfe; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ffff88ff88; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ffff88ff88; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ffff88ff88; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ffff88ff88; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefefefdbffefdfe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfefefeeffef7fefe; +- *((unsigned long*)& __m128i_result[1]) = 0xfef7fefebffefdfe; +- *((unsigned long*)& __m128i_result[0]) = 0xfefefefdfefefeef; +- __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x2d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvaddwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001fefc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001fffe0001fefc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000002; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0007000000050000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003000100010001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001fffe0001fefc; +- *((unsigned long*)& __m128i_result[1]) = 0x0006000100040001; +- *((unsigned long*)& __m128i_result[0]) = 0x00010002ffff0105; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffc0; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fff0ffc0; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffc0; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fff0ffc0; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000040; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffc0; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000fff0ffc0; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffc0; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000fff0ffc0; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff78ffc0; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0001fffe0001fefc; +- *((unsigned long*)& __m128d_op1[1]) = 0x0007000000050000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0003000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000ffff88ff88; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ffff88ff88; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000800000000000; +- __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x2f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0006000100040001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00010002ffff0105; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vslti_w(__m128i_op0,15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0x28); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_op1[2]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_op1[1]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_op1[0]) = 0xff88ff88ff880000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000800000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000800000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000800000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0100010001000101; +- *((unsigned long*)& __m128i_result[0]) = 0x0100010001000101; +- __m128i_out = __lsx_vbitseti_h(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000100040; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256d_result[3]) = 0x00000000ff890000; +- *((unsigned long*)& __m256d_result[2]) = 0x00000000ff790000; +- *((unsigned long*)& __m256d_result[1]) = 0x00000000ff890000; +- *((unsigned long*)& __m256d_result[0]) = 0x00000000ff790000; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_d(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_wu_hu(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvclz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op1[1]) = 0x0007000000050000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0003000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vssrln_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100040; +- unsigned_int_result = 0x0000000000000040; +- unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x6); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ff890000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff790000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff890000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff790000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ff790000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff790000; +- __m256i_out = __lasx_xvpackev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000bffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000040001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x6d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0007000000050000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0003000100010001; +- *((unsigned long*)& __m128i_result[1]) = 0x0080000100200001; +- *((unsigned long*)& __m128i_result[0]) = 0x0008000200020002; +- __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000060002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000060002; +- *((unsigned long*)& __m128i_op1[1]) = 0xe4c8b96e2560afe9; +- *((unsigned long*)& __m128i_op1[0]) = 0xc001a1867fffa207; +- *((unsigned long*)& __m128i_result[1]) = 0x0000c0010000a186; +- *((unsigned long*)& __m128i_result[0]) = 0x00067fff0002a207; +- __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000020ff790020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000020ff790020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128d_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffcfffffffc; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffcfffffffc; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffcfffffffc; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffcfffffffc; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe4c8b96e2560afe9; +- *((unsigned long*)& __m128i_op0[0]) = 0xc001a1867fffa207; +- *((unsigned long*)& __m128i_op1[1]) = 0xe4c8b96e2560afe9; +- *((unsigned long*)& __m128i_op1[0]) = 0xc001a1867fffa207; +- *((unsigned long*)& __m128i_result[1]) = 0xe2560afe9c001a18; +- *((unsigned long*)& __m128i_result[0]) = 0xe2560afe9c001a18; +- __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x24); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe2560afe9c001a18; +- *((unsigned long*)& __m128i_op0[0]) = 0xe2560afe9c001a18; +- *((unsigned long*)& __m128i_result[1]) = 0x89582bf870006860; +- *((unsigned long*)& __m128i_result[0]) = 0x89582bf870006860; +- __m128i_out = __lsx_vslli_w(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000020ff790020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000020ff790020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000020; +- __m256i_out = __lasx_xvshuf4i_w(__m256i_op0,0xa5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x89582bf870006860; +- *((unsigned long*)& __m128i_op1[0]) = 0x89582bf870006860; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_d(__m128i_op0,__m128i_op1,0x94); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000087; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000087; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xff800000; +- *((int*)& __m256_result[6]) = 0xff800000; +- *((int*)& __m256_result[5]) = 0xc30e0000; +- *((int*)& __m256_result[4]) = 0xff800000; +- *((int*)& __m256_result[3]) = 0xff800000; +- *((int*)& __m256_result[2]) = 0xff800000; +- *((int*)& __m256_result[1]) = 0xc30e0000; +- *((int*)& __m256_result[0]) = 0xff800000; +- __m256_out = __lasx_xvflogb_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000c0010000a186; +- *((unsigned long*)& __m128i_op0[0]) = 0x00067fff0002a207; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffff0002; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ff0000857a; +- *((unsigned long*)& __m128i_result[0]) = 0x05fafe0101fe000e; +- __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000100080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000100080; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff8900000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff8900000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000c0010000a186; +- *((unsigned long*)& __m128d_op1[0]) = 0x00067fff0002a207; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff0000857a; +- *((unsigned long*)& __m128i_op0[0]) = 0x05fafe0101fe000e; +- unsigned_int_result = 0x000000000000857a; +- unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x4); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100080; +- *((unsigned long*)& __m256i_result[3]) = 0x001a001a001a009a; +- *((unsigned long*)& __m256i_result[2]) = 0x001a001a002a009a; +- *((unsigned long*)& __m256i_result[1]) = 0x001a001a001a009a; +- *((unsigned long*)& __m256i_result[0]) = 0x001a001a002a009a; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001a001a001a009a; +- *((unsigned long*)& __m256i_op0[2]) = 0x001a001a002a009a; +- *((unsigned long*)& __m256i_op0[1]) = 0x001a001a001a009a; +- *((unsigned long*)& __m256i_op0[0]) = 0x001a001a002a009a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001a000000da; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001a000000da; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001a000000da; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001a000000da; +- __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe2560afe9c001a18; +- *((unsigned long*)& __m128i_op0[0]) = 0xe2560afe9c001a18; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff0000857a; +- *((unsigned long*)& __m128i_op1[0]) = 0x05fafe0101fe000e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000d82; +- *((unsigned long*)& __m128i_result[0]) = 0x046a09ec009c0000; +- __m128i_out = __lsx_vmulwod_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc30e0000ff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc30e0000ff800000; +- *((unsigned long*)& __m256i_result[3]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_result[2]) = 0xc3030000ff800000; +- *((unsigned long*)& __m256i_result[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_result[0]) = 0xc3030000ff800000; +- __m256i_out = __lasx_xvmini_b(__m256i_op0,3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000d82; +- *((unsigned long*)& __m128i_op0[0]) = 0x046a09ec009c0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x046a09ec009c0000; +- __m128i_out = __lsx_vextl_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffff8900000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffff8900000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,-16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000600007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000008ffffa209; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000011; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000016; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000011; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000016; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000011; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000600007fff; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000008ffffa209; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x046a09ec009c0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_d(__m128i_op0,-14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000002000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000002000000000; +- __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_h(__m128i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000600007fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000008ffffa209; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000600007fff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000008ffffa209; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000080040; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x046a09ec; +- *((int*)& __m128_op0[0]) = 0x009c0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff0000857a; +- *((unsigned long*)& __m128i_op1[0]) = 0x05fafe0101fe000e; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff7a86; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffe01fff2; +- __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000100080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000100080; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000006d; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000010006d; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000006d; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000010006d; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000002000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000002000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000080040; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x80000000; +- *((int*)& __m128_result[2]) = 0x80000000; +- *((int*)& __m128_result[1]) = 0x80000000; +- *((int*)& __m128_result[0]) = 0x80000000; +- __m128_out = __lsx_vfnmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x80000000; +- *((int*)& __m128_op0[2]) = 0x80000000; +- *((int*)& __m128_op0[1]) = 0x80000000; +- *((int*)& __m128_op0[0]) = 0x80000000; +- *((int*)& __m128_op1[3]) = 0x000000ff; +- *((int*)& __m128_op1[2]) = 0x0000857a; +- *((int*)& __m128_op1[1]) = 0x05fafe01; +- *((int*)& __m128_op1[0]) = 0x01fe000e; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000000000006d; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000000010006d; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000000000006d; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000000010006d; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000080040; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000080040; +- *((unsigned long*)& __m256d_result[3]) = 0x00000000000000ad; +- *((unsigned long*)& __m256d_result[2]) = 0x00000000001800ad; +- *((unsigned long*)& __m256d_result[1]) = 0x00000000000000ad; +- *((unsigned long*)& __m256d_result[0]) = 0x00000000001800ad; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000006; +- *((int*)& __m128_op1[2]) = 0x00007fff; +- *((int*)& __m128_op1[1]) = 0x00000008; +- *((int*)& __m128_op1[0]) = 0xffffa209; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x0000006d; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0010006d; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x0000006d; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0010006d; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00080040; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00080040; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00080040; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00080040; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00080040; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x0010006d; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00080040; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x0010006d; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x2b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000010006d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000010006d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000004000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004000000080; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff88ffc0; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ff78ffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000001ff1; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000001ff1; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x53); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc3030000ff800000; +- *((unsigned long*)& __m256i_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc3030000ff800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff0000857a; +- *((unsigned long*)& __m128i_op0[0]) = 0x05fafe0101fe000e; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000ff0000857a; +- *((unsigned long*)& __m128i_op1[0]) = 0x05fafe0101fe000e; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000ff0000857a; +- *((unsigned long*)& __m128i_result[0]) = 0x05fafe0101fe000e; +- __m128i_out = __lsx_vmaddwev_h_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000006d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000400008; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000006d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000400008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000010006d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000010006d; +- *((unsigned long*)& __m256i_result[3]) = 0x010101010101016c; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101410128; +- *((unsigned long*)& __m256i_result[1]) = 0x010101010101016c; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101410128; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000010006d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000010006d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000006d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000400008; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000006d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000400008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_op2[3]) = 0xcd636363; +- *((int*)& __m128_op2[2]) = 0xcd636363; +- *((int*)& __m128_op2[1]) = 0xcd636363; +- *((int*)& __m128_op2[0]) = 0xcd636363; +- *((int*)& __m128_result[3]) = 0xcd636363; +- *((int*)& __m128_result[2]) = 0xcd636363; +- *((int*)& __m128_result[1]) = 0xcd636363; +- *((int*)& __m128_result[0]) = 0xcd636363; +- __m128_out = __lsx_vfnmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0xcd636363; +- *((int*)& __m128_op1[2]) = 0xcd636363; +- *((int*)& __m128_op1[1]) = 0xcd636363; +- *((int*)& __m128_op1[0]) = 0xcd636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- long_op1 = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000040; +- __m128i_out = __lsx_vinsgr2vr_d(__m128i_op0,long_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000006d; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000400008; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000006d; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000400008; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000080040; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000008002d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000008002d; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010000000000; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000007f00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000007f00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000007f00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000007f00000000; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff0000857a; +- *((unsigned long*)& __m128i_op0[0]) = 0x05fafe0101fe000e; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010000080040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010000080040; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000007f00000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000007f00000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000007f00000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000007f00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x2e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_h(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_result[1]) = 0x00cd006300cd0063; +- *((unsigned long*)& __m128i_result[0]) = 0x00cd006300cd0063; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000010000080040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000010000080040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010000080040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010000080040; +- __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000010006d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000010006d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000800400010006d; +- __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000000010000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000cd630000cd63; +- *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_result[1]) = 0xffffcd63ffffcd63; +- *((unsigned long*)& __m128i_result[0]) = 0xffffd765ffffd765; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffcd63ffffcd63; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffd765ffffd765; +- *((unsigned long*)& __m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; +- *((unsigned long*)& __m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; +- __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000100080; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000100080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000010000080040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000080040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000010000080040; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fff8ffc0; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fff8ffc0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ff00fff8ffc0; +- __m256i_out = __lasx_xvneg_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x2d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1f1f1f1f1f1f1f1f; +- *((unsigned long*)& __m128i_op0[0]) = 0x1f1f1f1f1f1f1f1f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x1f1f1f1f1f1f1f1f; +- *((unsigned long*)& __m128i_op2[0]) = 0x1f1f1f1f1f1f1f1f; +- *((unsigned long*)& __m128i_result[1]) = 0x00081f1f1f1f1f1f; +- *((unsigned long*)& __m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; +- __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_op0[0]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_result[1]) = 0xf359f359f359f359; +- *((unsigned long*)& __m128i_result[0]) = 0xf359f359f359f359; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff00000000ffff; +- __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x93); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00010000; +- *((int*)& __m128_op0[2]) = 0x00010000; +- *((int*)& __m128_op0[1]) = 0x0000cd63; +- *((int*)& __m128_op0[0]) = 0x0000cd63; +- *((int*)& __m128_op1[3]) = 0xffffcd63; +- *((int*)& __m128_op1[2]) = 0xffffcd63; +- *((int*)& __m128_op1[1]) = 0xffffd765; +- *((int*)& __m128_op1[0]) = 0xffffd765; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000048; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000007d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000048; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000007d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000800000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000800000010; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ff40; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ff0100090040; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ff40; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ff0100090040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0001000000010000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000cd630000cd63; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0xffff00000000ffff; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xffff00000000ffff; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000800000010; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000800000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000002000000; +- __m256i_out = __lasx_xvsrlrni_d_q(__m256i_op0,__m256i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00081f1f; +- *((int*)& __m128_op0[2]) = 0x1f1f1f1f; +- *((int*)& __m128_op0[1]) = 0x1f1f1f1f; +- *((int*)& __m128_op0[0]) = 0x1f1f1f1f; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000000010000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000cd630000cd63; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000329d0000329d; +- __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_op1[0]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrar_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ffc0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ffc0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fff80000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000fff80000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fff80000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000fff80000; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ffc0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ffc0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000fff8ffc0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000fff8ffc0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ff00fff8ffc0; +- __m256i_out = __lasx_xvextrins_b(__m256i_op0,__m256i_op1,0x82); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x1c083b1f3b1f3b1f; +- *((unsigned long*)& __m128d_op0[0]) = 0xf244b948a323ab42; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8fff8; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ff00fff8ffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000fff8ff40; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ff0100090040; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000fff8ff40; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ff0100090040; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffefff80; +- __m256i_out = __lasx_xvsub_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_b(__m128i_op0,-12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,-8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00010000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00010000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x02000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x02000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000002000000; +- __m256i_out = __lasx_xvextrins_h(__m256i_op0,__m256i_op1,0x43); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op1[3]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc3030000ff800000; +- *((unsigned long*)& __m256i_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc3030000ff800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[2]) = 0x00003cfc0000006f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[0]) = 0x00003cfc0000006f; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000800400010006d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0008001c0010001c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0008001c0010001c; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000010; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000010; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf359f359f359f359; +- *((unsigned long*)& __m128i_op0[0]) = 0xf359f359f359f359; +- *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_result[1]) = 0x86dd8341b164f12b; +- *((unsigned long*)& __m128i_result[0]) = 0x9611c3985b3159f5; +- __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0200000002000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0200000002000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000002000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff01fb0408; +- *((unsigned long*)& __m256i_op1[2]) = 0xf2b180c9fc1fefdc; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff01fb0408; +- *((unsigned long*)& __m256i_op1[0]) = 0xf2b180c9fc1fefdc; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_op2[2]) = 0x00003cfc0000006f; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_op2[0]) = 0x00003cfc0000006f; +- *((unsigned long*)& __m256i_result[3]) = 0x02007f8002000400; +- *((unsigned long*)& __m256i_result[2]) = 0x0000c5dc02005f64; +- *((unsigned long*)& __m256i_result[1]) = 0x02007f8002000400; +- *((unsigned long*)& __m256i_result[0]) = 0x0000c5dc02005f64; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000fff8ff40; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ff0100090040; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000fff8ff40; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ff0100090040; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff02; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff02; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_op0[0]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x86dd8341b164f12b; +- *((unsigned long*)& __m128i_op1[0]) = 0x9611c3985b3159f5; +- *((unsigned long*)& __m128i_result[1]) = 0x86dd8341b164f12b; +- *((unsigned long*)& __m128i_result[0]) = 0x9611c3985b3159f5; +- __m128i_out = __lsx_vmin_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000010; +- __m256i_out = __lasx_xvexth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_op1[1]) = 0x86dd8341b164f12b; +- *((unsigned long*)& __m128i_op1[0]) = 0x9611c3985b3159f5; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000035697d4e; +- *((unsigned long*)& __m128i_result[0]) = 0x000000013ecaadf2; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xf359f359f359f359; +- *((unsigned long*)& __m128d_op0[0]) = 0xf359f359f359f359; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xfff8ff40; +- *((int*)& __m256_op0[5]) = 0x0000ff01; +- *((int*)& __m256_op0[4]) = 0x00090040; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xfff8ff40; +- *((int*)& __m256_op0[1]) = 0x0000ff01; +- *((int*)& __m256_op0[0]) = 0x00090040; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001700000017; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001700000017; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001700000017; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001700000017; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000010; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x9d9d9d9d9d9d9d8d; +- *((unsigned long*)& __m256i_result[2]) = 0x9d9d9d9d9d9d9d9d; +- *((unsigned long*)& __m256i_result[1]) = 0x9d9d9d9d9d9d9d8d; +- *((unsigned long*)& __m256i_result[0]) = 0x9d9d9d9d9d9d9d9d; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0x62); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xf359f359f359f359; +- *((unsigned long*)& __m128i_op1[0]) = 0xf359f359f359f359; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffff359f358; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffff359f358; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x86dd8341b164f12b; +- *((unsigned long*)& __m128i_op0[0]) = 0x9611c3985b3159f5; +- *((unsigned long*)& __m128i_result[1]) = 0x0021b761002c593c; +- *((unsigned long*)& __m128i_result[0]) = 0x002584710016cc56; +- __m128i_out = __lsx_vsrlri_w(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x86dd8341; +- *((int*)& __m128_op1[2]) = 0xb164f12b; +- *((int*)& __m128_op1[1]) = 0x9611c398; +- *((int*)& __m128_op1[0]) = 0x5b3159f5; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_sult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x86dd8341b164f12b; +- *((unsigned long*)& __m128i_op0[0]) = 0x9611c3985b3159f5; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff86dd83ff9611c3; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_w_d(__m128i_op0,__m128i_op1,0x28); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffefff7f00100080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffefff7f00100080; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff01fb0408; +- *((unsigned long*)& __m256i_op1[2]) = 0xf2b180c9fc1fefdc; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff01fb0408; +- *((unsigned long*)& __m256i_op1[0]) = 0xf2b180c9fc1fefdc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0021b761002c593c; +- *((unsigned long*)& __m128i_op1[0]) = 0x002584710016cc56; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0200000002000000; +- *((unsigned long*)& __m256i_result[2]) = 0x02000000fdffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0200000002000000; +- *((unsigned long*)& __m256i_result[0]) = 0x02000000fdffffff; +- __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_result[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_result[0]) = 0xf9796558e39953fd; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000d3259a; +- __m128i_out = __lsx_vbsrl_v(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0200000002000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x02000000fdffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0200000002000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x02000000fdffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000004ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000004ffffffff; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff86dd83ff9611c3; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000035697d4e; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000013ecaadf2; +- *((unsigned long*)& __m128i_result[1]) = 0xe280e67f00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00007f80; +- __m128i_out = __lsx_vssrarni_b_h(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff01fb0408; +- *((unsigned long*)& __m256i_op0[2]) = 0xf2b180c9fc1fefdc; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff01fb0408; +- *((unsigned long*)& __m256i_op0[0]) = 0xf2b180c9fc1fefdc; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0xf2b180c9fc1fefdc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0xf2b180c9fc1fefdc; +- __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0xf2b180c9fc1fefdc; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0xf2b180c9fc1fefdc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000002ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000002ff; +- __m256i_out = __lasx_xvmsknz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfsqrt_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffintl_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ef; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; +- __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000ef; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000155b200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000b70000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000016e00; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000035697d4e; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000013ecaadf2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x000000000155b200; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000b70000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000016e00; +- __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x000002ff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x000002ff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x000002ff; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x000002ff; +- __m256_out = __lasx_xvfmax_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00016e00; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00016e00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xfffffffff359f358; +- *((unsigned long*)& __m128d_op1[0]) = 0xfffffffff359f358; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000029170; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000029170; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001fff000; +- __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4ee376188658d85f; +- *((unsigned long*)& __m128i_op0[0]) = 0x5728dcc85ac760d2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4e1d76187a58285f; +- *((unsigned long*)& __m128i_result[0]) = 0x572824385a39602e; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0021b761002c593c; +- *((unsigned long*)& __m128i_op0[0]) = 0x002584710016cc56; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000001e03; +- *((unsigned long*)& __m128i_result[1]) = 0x0021b761002c593c; +- *((unsigned long*)& __m128i_result[0]) = 0x002584710016ea59; +- __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_result[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_result[0]) = 0xf9796558e39953fd; +- __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffdfff80; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffdfff80; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001e03; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000011e04; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffdfff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffdfff80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000016e00; +- *((unsigned long*)& __m256i_result[3]) = 0xffdfff80ffdfff80; +- *((unsigned long*)& __m256i_result[2]) = 0xffdfff80ffdfff80; +- *((unsigned long*)& __m256i_result[1]) = 0xffdfff80ffdfff80; +- *((unsigned long*)& __m256i_result[0]) = 0xffdfff80ffdfff80; +- __m256i_out = __lasx_xvperm_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0xd5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffdfff80ffdfff80; +- *((unsigned long*)& __m256i_op1[2]) = 0xffdfff80ffdfff80; +- *((unsigned long*)& __m256i_op1[1]) = 0xffdfff80ffdfff80; +- *((unsigned long*)& __m256i_op1[0]) = 0xffdfff80ffdfff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ff00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ff00; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffff359f358; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffff359f358; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffff00ff00; +- __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op1[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000d3460001518a; +- *((unsigned long*)& __m128i_op0[0]) = 0x000084300000e55f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000016; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000016; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000029170; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000029170; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000029170; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fff000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000029170; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001ff03ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000203ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001ff03ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000203ff; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000100; +- *((int*)& __m256_op0[5]) = 0x00000002; +- *((int*)& __m256_op0[4]) = 0xff910072; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000100; +- *((int*)& __m256_op0[1]) = 0x00000002; +- *((int*)& __m256_op0[0]) = 0xff910072; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000001fff0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000feff0001ffb8; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000001fff0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000feff0001ffb8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_op0[2]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_op0[1]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_op0[0]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; +- *((unsigned long*)& __m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,-14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fff0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000feff0001ffb8; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fff0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000feff0001ffb8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000016; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_b_h(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffd5d5ffffd5d6; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffd5d5ffffd5d6; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff6361; +- *((unsigned long*)& __m256i_op0[2]) = 0x4d0a902890b800dc; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff6361; +- *((unsigned long*)& __m256i_op0[0]) = 0x4d0a902890b800dc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_result[1]) = 0x001a64b345308091; +- *((unsigned long*)& __m128i_result[0]) = 0x001f2f2cab1c732a; +- __m128i_out = __lsx_vsrli_d(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff6361; +- *((unsigned long*)& __m256i_op0[2]) = 0x4d0a902890b800dc; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff6361; +- *((unsigned long*)& __m256i_op0[0]) = 0x4d0a902890b800dc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ff03ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000203ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ff03ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000203ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000001ff03fe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffec75c2d209f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000001ff03fe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffec75c2d209f; +- __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff6361; +- *((unsigned long*)& __m256i_op0[2]) = 0x4d0a902890b800dc; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffff6361; +- *((unsigned long*)& __m256i_op0[0]) = 0x4d0a902890b800dc; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff6361; +- *((unsigned long*)& __m256i_op1[2]) = 0x4d0a902890b800dc; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffff6361; +- *((unsigned long*)& __m256i_op1[0]) = 0x4d0a902890b800dc; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0xffffb2f600006f48; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000014414104505; +- *((unsigned long*)& __m128i_op0[0]) = 0x1011050040004101; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000014414104505; +- *((unsigned long*)& __m128i_op1[0]) = 0x1011050040004101; +- *((unsigned long*)& __m128i_result[1]) = 0x1010111105050000; +- *((unsigned long*)& __m128i_result[0]) = 0x4040000041410101; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001a64b345308091; +- *((unsigned long*)& __m128i_op0[0]) = 0x001f2f2cab1c732a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000014414104505; +- *((unsigned long*)& __m128i_op1[0]) = 0x1011050040004101; +- *((unsigned long*)& __m128i_result[1]) = 0x001a323b5430048c; +- *((unsigned long*)& __m128i_result[0]) = 0x008f792cab1cb915; +- __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd3259a2984048c23; +- *((unsigned long*)& __m128i_op0[0]) = 0xf9796558e39953fd; +- *((unsigned long*)& __m128i_op1[1]) = 0x1010111105050000; +- *((unsigned long*)& __m128i_op1[0]) = 0x4040000041410101; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000808000020200; +- __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x2d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000001ff03ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000203ff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000001ff03ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000203ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001ff03ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001ff03ff; +- __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1010111105050000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4040000041410101; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000110011; +- *((unsigned long*)& __m128i_result[0]) = 0x0005000500000000; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001a323b5430048c; +- *((unsigned long*)& __m128i_op0[0]) = 0x008f792cab1cb915; +- *((unsigned long*)& __m128i_result[1]) = 0x001a323b00ffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x008f792c00ffffff; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000001fff0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000feff0001ffb8; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000001fff0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000feff0001ffb8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_h_w(__m256i_op0,__m256i_op1,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff03ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000203ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff03ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000203ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000fafe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000fafe; +- __m256i_out = __lasx_xvmskgez_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff03fe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffec75c2d209f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff03fe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffec75c2d209f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001ff03fe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffec75c2d209f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001ff03fe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffec75c2d209f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000001ff000003fe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000001ff000003fe; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x00000000000000ff; +- __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000808000020200; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff8000020000; +- __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffb3430a; +- *((int*)& __m256_op0[4]) = 0x006ed8b8; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffb3430a; +- *((int*)& __m256_op0[0]) = 0x006ed8b8; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x000001ff; +- *((int*)& __m256_op1[4]) = 0x000003fe; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x000001ff; +- *((int*)& __m256_op1[0]) = 0x000003fe; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x000000ff; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x000000ff; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xfff3430a; +- *((int*)& __m256_result[4]) = 0x000000ff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xfff3430a; +- *((int*)& __m256_result[0]) = 0x000000ff; +- __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m256_op0[7]) = 0x00000001; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0xffffb2f6; +- *((int*)& __m256_op0[4]) = 0x00006f48; +- *((int*)& __m256_op0[3]) = 0x00000001; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0xffffb2f6; +- *((int*)& __m256_op0[0]) = 0x00006f48; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x000000ff; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000000; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001a64b345308091; +- *((unsigned long*)& __m128i_op0[0]) = 0x001f2f2cab1c732a; +- *((unsigned long*)& __m128i_op1[1]) = 0x1baf8eabd26bc629; +- *((unsigned long*)& __m128i_op1[0]) = 0x1c2640b9a8e9fb49; +- *((unsigned long*)& __m128i_result[1]) = 0x0002dab8746acf8e; +- *((unsigned long*)& __m128i_result[0]) = 0x00036dd1c5c15856; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256i_result[3]) = 0x4000400140004001; +- *((unsigned long*)& __m256i_result[2]) = 0xfffff2f640006f48; +- *((unsigned long*)& __m256i_result[1]) = 0x4000400140004001; +- *((unsigned long*)& __m256i_result[0]) = 0xfffff2f640006f48; +- __m256i_out = __lasx_xvbitseti_h(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000aa822a8228222; +- *((unsigned long*)& __m128i_op0[0]) = 0x03aa558ec8546eb6; +- *((unsigned long*)& __m128i_op1[1]) = 0x001a64b345308091; +- *((unsigned long*)& __m128i_op1[0]) = 0x001f2f2cab1c732a; +- *((unsigned long*)& __m128i_result[1]) = 0x0155ffff754affff; +- *((unsigned long*)& __m128i_result[0]) = 0x034cffff03e5ffff; +- __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3fd1000000000000; +- __m256i_out = __lasx_xvldi(-943); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000001e03; +- *((unsigned long*)& __m128i_op1[1]) = 0x001a64b345308091; +- *((unsigned long*)& __m128i_op1[0]) = 0x001f2f2cab1c732a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000780c00000; +- __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op2[2]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op2[0]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256d_result[3]) = 0x8000000100000001; +- *((unsigned long*)& __m256d_result[2]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256d_result[1]) = 0x8000000100000001; +- *((unsigned long*)& __m256d_result[0]) = 0xffffb2f600006f48; +- __m256d_out = __lasx_xvfmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff000000ff000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff000000ff000000; +- __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffb2f600006f48; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000008c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000008c; +- __m256i_out = __lasx_xvmskltz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1baf8eabd26bc629; +- *((unsigned long*)& __m128i_op0[0]) = 0x1c2640b9a8e9fb49; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002dab8746acf8e; +- *((unsigned long*)& __m128i_op1[0]) = 0x00036dd1c5c15856; +- *((unsigned long*)& __m128i_result[1]) = 0x1bb1686346d595b7; +- *((unsigned long*)& __m128i_result[0]) = 0x1c29ad8a6daa539f; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff00ffffffff; +- __m256i_out = __lasx_xvseq_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000006de1; +- *((unsigned long*)& __m128i_op0[0]) = 0x5f9ccf33cf600000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x41f0000000000000; +- __m256d_out = __lasx_xvffint_d_lu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010000000001; +- __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000006de1; +- *((unsigned long*)& __m128i_op0[0]) = 0x5f9ccf33cf600000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000007; +- *((unsigned long*)& __m128i_result[0]) = 0x0007000700070000; +- __m128i_out = __lsx_vsat_hu(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x000aa822a79308f6; +- *((unsigned long*)& __m128d_op1[0]) = 0x03aa558e1d37b5a1; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000008c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000008c; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000008b; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff010000008b; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fafe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000fafe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000008c; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000008c; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x0000008c; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x0000008c; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000118; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000118; +- __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000008c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000008c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001180000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001180000000; +- __m256i_out = __lasx_xvsllwil_du_wu(__m256i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000aa822a79308f6; +- *((unsigned long*)& __m128i_op0[0]) = 0x03aa558e1d37b5a1; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff80fd820000; +- *((unsigned long*)& __m128i_result[1]) = 0x000aa822a79308f6; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000084d12ce; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000010000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000008b; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff010000008b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x000aa822; +- *((int*)& __m128_op0[2]) = 0xa79308f6; +- *((int*)& __m128_op0[1]) = 0x03aa355e; +- *((int*)& __m128_op0[0]) = 0x1d37b5a1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000118; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000118; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000024170000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000aa822a79308f6; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000024170000; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000118; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000118; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_d_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000aa822a79308f6; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op1[1]) = 0x000aa822a79308f6; +- *((unsigned long*)& __m128i_op1[0]) = 0x03aa558e1d37b5a1; +- *((unsigned long*)& __m128i_result[1]) = 0x00155044ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x03aa558e2584c86f; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0002de46; +- *((int*)& __m128_op0[2]) = 0x682de060; +- *((int*)& __m128_op0[1]) = 0x09b50da6; +- *((int*)& __m128_op0[0]) = 0xe67b8fc0; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x084d12ce; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x24170000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000024170000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000020300000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000044470000; +- __m128i_out = __lsx_vadda_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0x56); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000118; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000118; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0x2e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000044470000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00004dce00004700; +- __m128i_out = __lsx_vsrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0000fafe; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0000fafe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0001ffff0001ffff; +- __m256i_out = __lasx_xvsrli_w(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vclz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvbitset_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000044470000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff0000ffff; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op2[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000042ab41; +- *((unsigned long*)& __m128i_result[0]) = 0xb1b1b1b1b16f0670; +- __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplve0_q(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0b4c600000000002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000042ab41; +- *((unsigned long*)& __m128i_op0[0]) = 0xb1b1b1b1b16f0670; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000042ab41; +- *((unsigned long*)& __m128i_result[0]) = 0xb1b1b1b1b16f0670; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000042ab41; +- *((unsigned long*)& __m128i_op0[0]) = 0xb1b1b1b1b16f0670; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000044470000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; +- __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0b4c600000000002; +- *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[0]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0004280808080808; +- __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0xa4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x084d12ce; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x24170000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000024170000; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000044470000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cun_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0001ffff0001ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextrins_d(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; +- __m128i_out = __lsx_vmini_b(__m128i_op0,5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_op1[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_op1[0]) = 0x0004280808080808; +- *((unsigned long*)& __m128i_result[1]) = 0x0010203030201000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000808080800; +- __m128i_out = __lsx_vmulwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000084d12ce; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000024170000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff0000; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ff0000ffff; +- __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcvt_h_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000000ff0000ffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cueq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; +- __m128i_out = __lsx_vmax_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x03fbfffc03fc07fc; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x03fbfffc03fc07fc; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000404040; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_w_d(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x03fbfffc03fc07fc; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x03fbfffc03fc07fc; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff80000000; +- __m256i_out = __lasx_xvssrani_w_d(__m256i_op0,__m256i_op1,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_result[1]) = 0x000000001fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000100; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x0000ffff; +- *((int*)& __m256_op0[6]) = 0x0000ffff; +- *((int*)& __m256_op0[5]) = 0x0000ffff; +- *((int*)& __m256_op0[4]) = 0x0000ffff; +- *((int*)& __m256_op0[3]) = 0x0000ffff; +- *((int*)& __m256_op0[2]) = 0x0000ffff; +- *((int*)& __m256_op0[1]) = 0x0000ffff; +- *((int*)& __m256_op0[0]) = 0x0000ffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x03fbfffc03fc07fc; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x03fbfffc03fc07fc; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000ffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000ffff0000ffff; +- __m256i_out = __lasx_xvinsve0_d(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000404040; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff0000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffff0000; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x08080807f7f7f7f8; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_result[1]) = 0x08080805f5f5f5f8; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; +- __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffff00; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000001ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000001ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x08080807f5f5f5f8; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ff00; +- *((unsigned long*)& __m128i_result[1]) = 0x04040403fafafafc; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff80; +- __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_result[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7efefefe80ffffff; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvexth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x08080807f5f5f5f8; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0202f5f80000ff00; +- __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xffff0000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffe0000000; +- __m128d_out = __lsx_vfcvth_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; +- *((unsigned long*)& __m128i_op1[1]) = 0x04040403fafafafc; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000ff80; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x007efffefffefffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xff80fffffffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x007efffefffefffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xff80fffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsllwil_w_h(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0202f5f80000ff00; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffbfff; +- *((unsigned long*)& __m256i_op0[2]) = 0x3f7f7f7f407fffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x3f7f7f7f407fffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000fdfdfe; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffe0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffe0000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fdfdfe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001fffe0001fffe; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001fffe00010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7ffe0001fffe0001; +- *((unsigned long*)& __m256i_result[2]) = 0x7ffe0001fffeffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000fdfdfe; +- __m256i_out = __lasx_xvsub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vextrins_b(__m128i_op0,__m128i_op1,0xc5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_w_d(__m256i_op0,__m256i_op1,0x34); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000007fff7fff; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x36); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff8001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_op0[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_op1[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_result[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7efefefe80ffffff; +- __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_w(__m256i_op0,4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x4079808280057efe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007ffcfcfd020202; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x004000800080007e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000fc00fd0002; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100c00000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_du_q(__m128i_op0,__m128i_op1,0x26); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ffe0001fffe0001; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ffe0001fffeffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000fdfdfe; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrm_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000017f00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007f7f03030000; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000017f00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007f7f03030000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000017f00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00007f7f03030000; +- __m256i_out = __lasx_xvsat_du(__m256i_op0,0x37); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- long_op0 = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000020006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000020006; +- __m256i_out = __lasx_xvreplgr2vr_d(long_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0xff800000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; +- int_op1 = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x000000ffffff0000; +- __m128i_out = __lsx_vinsgr2vr_w(__m128i_op0,int_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00020006; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00020006; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00020006; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00020006; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x37b0003000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x37b0003000000000; +- __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffe045fffffeff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffff7d; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000017f00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00007f7f03030000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_h(__m128i_op0,3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_result[0]) = 0x5252525252525252; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256d_op1[2]) = 0x4079808280057efe; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x007ffcfcfd020202; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0fffffff0fffffff; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x90007fff90008000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0ffffffe90008000; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffff8000; +- *((int*)& __m256_op0[5]) = 0x7efefefe; +- *((int*)& __m256_op0[4]) = 0x80ffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x7efefefe; +- *((int*)& __m256_op0[0]) = 0x80ffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_clt_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrani_b_h(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x07ffffff07ffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x07ffffff08000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x07ffffff08000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x207f207f207f2000; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000207f2000; +- *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x207f207f207f2000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000207f2000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[3]) = 0xdf80df80df80dfff; +- *((unsigned long*)& __m256i_result[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffdf80dfff; +- *((unsigned long*)& __m256i_result[0]) = 0x8080808080808080; +- __m256i_out = __lasx_xvsub_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xdf80df80df80dfff; +- *((unsigned long*)& __m256i_op1[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffdf80dfff; +- *((unsigned long*)& __m256i_op1[0]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_b(__m256i_op0,11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000290; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000290; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7efefefe80ffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff003fffc0; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000003fffc0; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_slt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x90007fff90008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0ffffffe90008000; +- *((unsigned long*)& __m256i_result[3]) = 0x87ffffff87ffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xc880bfffc880c080; +- *((unsigned long*)& __m256i_result[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_result[0]) = 0x87ffffffc880c080; +- __m256i_out = __lasx_xvavgr_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000290; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000290; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000002; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xdf80df80df80dfff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffdf80dfff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffc00fffffc00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffc00fffffc00; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvmaxi_w(__m256i_op0,-2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010100000101; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0xff800000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000010100000101; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000010100000101; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[2]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[0]) = 0xff00ff007f007f00; +- __m256i_out = __lasx_xvbitclr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x2e2b34ca59fa4c88; +- *((unsigned long*)& __m128i_op1[0]) = 0x3b2c8aefd44be966; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x2e34594c3b000000; +- __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000101; +- __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x2e2b34ca59fa4c88; +- *((unsigned long*)& __m128i_op1[0]) = 0x3b2c8aefd44be966; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f8000007f800000; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256i_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256i_result[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[2]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_result[0]) = 0xff00ff007f007f00; +- __m256i_out = __lasx_xvmini_d(__m256i_op0,-5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffc00fffffc00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffc00fffffc00; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[2]) = 0xc03fc03fc03fc03f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_result[0]) = 0xc03fc03fc03fc03f; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x3a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[2]) = 0xc03fc03fc03fc03f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000003f; +- *((unsigned long*)& __m256i_op0[0]) = 0xc03fc03fc03fc03f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000002d; +- *((unsigned long*)& __m256i_result[2]) = 0xc02dc02dc02dc02d; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000002d; +- *((unsigned long*)& __m256i_result[0]) = 0xc02dc02dc02dc02d; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0xed); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000007; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x2e34594c3b000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x002e0059003b0000; +- __m128i_out = __lsx_vpackod_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvmskgez_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256d_op0[2]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256d_op0[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256d_op0[0]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256d_op1[3]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256d_op1[2]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256d_op1[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m256d_op1[0]) = 0xff00ff007f007f00; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128d_op0[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x001a001a001a001a; +- *((unsigned long*)& __m128i_result[0]) = 0x001a001a001a001a; +- __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x000000ff; +- *((int*)& __m256_op0[4]) = 0x000000ff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x000000ff; +- *((int*)& __m256_op0[0]) = 0x000000ff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000101; +- *((int*)& __m256_op1[4]) = 0x00000101; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000101; +- *((int*)& __m256_op1[0]) = 0x00000101; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmul_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m128_op0[3]) = 0x7ff80000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x7ff80000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvssub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x2e34594c; +- *((int*)& __m128_op0[0]) = 0x3b000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vftintrpl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x800000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x800000ff000000ff; +- __m256i_out = __lasx_xvbitrev_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m128i_result[0]) = 0x0808080808080808; +- __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001a001a001a001a; +- *((unsigned long*)& __m128i_op0[0]) = 0x001a001a001a001a; +- *((unsigned long*)& __m128i_result[1]) = 0x001a001a001a000b; +- *((unsigned long*)& __m128i_result[0]) = 0x001a001a001a000b; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x2e34594c3b000000; +- *((unsigned long*)& __m128i_result[1]) = 0xe9e9e9e9e9e9e9e9; +- *((unsigned long*)& __m128i_result[0]) = 0x171d423524e9e9e9; +- __m128i_out = __lsx_vsubi_bu(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x002e0059003b0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000005c000000b2; +- *((unsigned long*)& __m128i_result[0]) = 0x0000007600000000; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x2e34594c3b000000; +- *((unsigned long*)& __m128i_result[1]) = 0x017001a002c80260; +- *((unsigned long*)& __m128i_result[0]) = 0x01d8000000000000; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x000000ff; +- *((int*)& __m256_op0[4]) = 0x000000ff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x000000ff; +- *((int*)& __m256_op0[0]) = 0x000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010000000100; +- __m256i_out = __lasx_xvfclass_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x017001a002c80260; +- *((unsigned long*)& __m128i_op0[0]) = 0x01d8000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x2e34594c3b000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vssrarni_wu_d(__m128i_op0,__m128i_op1,0x10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00feff0100feff01; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00feff0100feff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff000000ff; +- __m256i_out = __lasx_xvhsubw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000005c000000b2; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000007600000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffffffff; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x2e34594c3b000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000002e34594c; +- __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x800000ff000000ff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x800000ff000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x90007fff90008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0ffffffe90008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x4800408ef07f7f01; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0800000eeffffe02; +- __m256i_out = __lasx_xvmulwod_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0xff800000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000010000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000010000000; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_result[0]) = 0xff800000ff800000; +- __m128i_out = __lsx_vmin_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00feff0100feff01; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00feff0100feff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000010000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000010000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff8000010f800000; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff8000010f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fff80000; +- __m128i_out = __lsx_vmuh_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001a001a001a000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x001a001a001a000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x001a001a001a000b; +- *((unsigned long*)& __m128i_op1[0]) = 0x001a001a001a000b; +- *((unsigned long*)& __m128i_result[1]) = 0x001a001a001a0008; +- *((unsigned long*)& __m128i_result[0]) = 0x001a001a001a000b; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff8000010f800000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; +- __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000002d; +- *((unsigned long*)& __m256i_op0[2]) = 0xc02dc02dc02dc02d; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000002d; +- *((unsigned long*)& __m256i_op0[0]) = 0xc02dc02dc02dc02d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xfff0000000000000; +- __m256d_out = __lasx_xvflogb_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xff8000010f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ff8000010f78; +- __m128i_out = __lsx_vaddwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x002a001a001a000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000002a001a; +- *((unsigned long*)& __m128i_result[0]) = 0x001a000b00000000; +- __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x78); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x001a001a; +- *((int*)& __m128_op0[2]) = 0x001a0008; +- *((int*)& __m128_op0[1]) = 0x001a001a; +- *((int*)& __m128_op0[0]) = 0x001a000b; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xff800001; +- *((int*)& __m128_op1[0]) = 0x0f800000; +- *((int*)& __m128_op2[3]) = 0xff800000; +- *((int*)& __m128_op2[2]) = 0xff800000; +- *((int*)& __m128_op2[1]) = 0xff800000; +- *((int*)& __m128_op2[0]) = 0xff800000; +- *((int*)& __m128_result[3]) = 0xffffffff; +- *((int*)& __m128_result[2]) = 0xffffffff; +- *((int*)& __m128_result[1]) = 0xffc00001; +- *((int*)& __m128_result[0]) = 0xff800000; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000002a001a; +- *((unsigned long*)& __m128i_op0[0]) = 0x001a000b00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffe000ffffffffff; +- __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff8000010f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000900000009; +- *((unsigned long*)& __m128i_result[0]) = 0xff80000a0f800009; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128d_op0[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128d_op1[1]) = 0x002a001a001a000b; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x002a001a001a000b; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmina_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xff800001; +- *((int*)& __m128_op0[0]) = 0x0f800000; +- *((int*)& __m128_op1[3]) = 0x00000009; +- *((int*)& __m128_op1[2]) = 0x00000009; +- *((int*)& __m128_op1[1]) = 0xff80000a; +- *((int*)& __m128_op1[0]) = 0x0f800009; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xff7fff80; +- *((int*)& __m128_op0[2]) = 0xff800001; +- *((int*)& __m128_op0[1]) = 0xe593d844; +- *((int*)& __m128_op0[0]) = 0xe593c8c4; +- *((int*)& __m128_op1[3]) = 0xff800000; +- *((int*)& __m128_op1[2]) = 0xff800000; +- *((int*)& __m128_op1[1]) = 0xe593c8c4; +- *((int*)& __m128_op1[0]) = 0xe593c8c4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ff8000010f78; +- *((unsigned long*)& __m128i_op1[1]) = 0x002a001a001a000b; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001a0000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff8000010f78; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff7f0080ff7ef088; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0010001000030000; +- *((unsigned long*)& __m256i_result[2]) = 0x0010001000030000; +- *((unsigned long*)& __m256i_result[1]) = 0x0010001000030000; +- *((unsigned long*)& __m256i_result[0]) = 0x0010001000030000; +- __m256i_out = __lasx_xvpcnt_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x0000ffff; +- *((int*)& __m256_op0[4]) = 0x0000ffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x0000ffff; +- *((int*)& __m256_op0[0]) = 0x0000ffff; +- *((int*)& __m256_op1[7]) = 0x00100010; +- *((int*)& __m256_op1[6]) = 0x00030000; +- *((int*)& __m256_op1[5]) = 0x00100010; +- *((int*)& __m256_op1[4]) = 0x00030000; +- *((int*)& __m256_op1[3]) = 0x00100010; +- *((int*)& __m256_op1[2]) = 0x00030000; +- *((int*)& __m256_op1[1]) = 0x00100010; +- *((int*)& __m256_op1[0]) = 0x00030000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_result[0]) = 0xff800000ff800000; +- __m128i_out = __lsx_vreplve_w(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x002a001a001a000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000002a001a; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000001a000b; +- __m128i_out = __lsx_vexth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0010001000030000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0010001000030000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0010001000030000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0010001000030000; +- *((int*)& __m256_result[7]) = 0x49800080; +- *((int*)& __m256_result[6]) = 0x48400000; +- *((int*)& __m256_result[5]) = 0x49800080; +- *((int*)& __m256_result[4]) = 0x48400000; +- *((int*)& __m256_result[3]) = 0x49800080; +- *((int*)& __m256_result[2]) = 0x48400000; +- *((int*)& __m256_result[1]) = 0x49800080; +- *((int*)& __m256_result[0]) = 0x48400000; +- __m256_out = __lasx_xvffint_s_w(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001a0000000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_b(__m128i_op0,15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; +- *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0x8080000080800000; +- *((unsigned long*)& __m128i_result[0]) = 0x9380c4009380c400; +- __m128i_out = __lsx_vpackev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000001a0000000b; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x00000080000000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff000000000000; +- __m256i_out = __lasx_xvslei_h(__m256i_op0,-8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffc00001ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x003ffffe00800000; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x003ffffe00800000; +- *((unsigned long*)& __m128i_result[1]) = 0xff810001ff810002; +- *((unsigned long*)& __m128i_result[0]) = 0x7f804000ff810001; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x003ffffe00800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000034; +- __m128i_out = __lsx_vmskltz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000002a001a; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000001a000b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128d_op0[0]) = 0xe593c8c4e593c8c4; +- *((unsigned long*)& __m128d_result[1]) = 0x805ffffe01001fe0; +- *((unsigned long*)& __m128d_result[0]) = 0x9a49e11102834d70; +- __m128d_out = __lsx_vfrecip_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffff801000000010; +- *((unsigned long*)& __m256i_op2[2]) = 0xffff800300000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xffff801000000010; +- *((unsigned long*)& __m256i_op2[0]) = 0xffff800300000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvmaddwev_d_wu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff801000000010; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800300000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff801000000010; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800300000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffff801000000010; +- *((unsigned long*)& __m256i_result[2]) = 0xffff800300000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffff801000000010; +- *((unsigned long*)& __m256i_result[0]) = 0xffff800300000000; +- __m256i_out = __lasx_xvsra_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_d(__m256i_op0,-3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffff801000000010; +- *((unsigned long*)& __m256d_op1[2]) = 0xffff800300000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffff801000000010; +- *((unsigned long*)& __m256d_op1[0]) = 0xffff800300000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffe0000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffe0000000; +- __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x805ffffe01001fe0; +- *((unsigned long*)& __m128i_op0[0]) = 0x9a49e11102834d70; +- *((unsigned long*)& __m128i_op1[1]) = 0x8144ffff01c820a4; +- *((unsigned long*)& __m128i_op1[0]) = 0x9b2ee1a4034b4e34; +- *((unsigned long*)& __m128i_result[1]) = 0xff1affff01001fe0; +- *((unsigned long*)& __m128i_result[0]) = 0xff1aff6d02834d70; +- __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x841f000fc28f801f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x007c0000003e0080; +- __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x003ffffe00800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrarni_hu_w(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; +- *((unsigned long*)& __m128i_op1[1]) = 0x8144ffff01c820a4; +- *((unsigned long*)& __m128i_op1[0]) = 0x9b2ee1a4034b4e34; +- *((unsigned long*)& __m128i_result[1]) = 0xffff80c400000148; +- *((unsigned long*)& __m128i_result[0]) = 0xffff80c1ffffe8de; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff801000000010; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff800300000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff801000000010; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff800300000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000cc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000cc; +- __m256i_out = __lasx_xvmskltz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc1bdceee242070dc; +- *((unsigned long*)& __m128i_op0[0]) = 0xe907b754d7eaa478; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_h_w(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff1affff01001fe0; +- *((unsigned long*)& __m128i_op0[0]) = 0xff1aff6d02834d70; +- *((unsigned long*)& __m128i_result[1]) = 0x7f800d007f803680; +- *((unsigned long*)& __m128i_result[0]) = 0x0100418026803800; +- __m128i_out = __lsx_vsllwil_hu_bu(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffef; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffee; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffef; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffee; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff1affff01001fe0; +- *((unsigned long*)& __m128i_op0[0]) = 0xff1aff6d02834d70; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000034; +- *((unsigned long*)& __m128i_result[1]) = 0xfe1bfefe00011ee1; +- *((unsigned long*)& __m128i_result[0]) = 0xfe1bfe6c03824c60; +- __m128i_out = __lsx_vbitrev_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000c040c0; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000c040c0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff80c400000148; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff80c1ffffe8de; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000148; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000034; +- __m128i_out = __lsx_vmax_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x841f000fc28f801f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x107c003c083c007c; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffe00000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffe00000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000007f8; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000002de; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000007f8; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000002de; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000007f7; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffff808; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000007f7; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffff808; +- __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000c040c0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000c040c0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_op2[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_op2[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffe000ffffffff08; +- *((unsigned long*)& __m256i_result[1]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffe000ffffffff08; +- __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff1afffefec0ec85; +- *((unsigned long*)& __m128i_op0[0]) = 0xff1aff6d48ce567f; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff80c400000148; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff80c1ffffe8de; +- *((unsigned long*)& __m128i_result[1]) = 0xffe3ffd8ffe30919; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffffffff; +- __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; +- __m256i_out = __lasx_xvneg_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff80000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x841f000fc28f801f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x841f000fc28f801f; +- *((unsigned long*)& __m128i_op2[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op2[0]) = 0xe593c8c4e593c8c4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x76ecfc8b85ac78db; +- __m128i_out = __lsx_vmaddwev_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x001c001c001c001c; +- *((unsigned long*)& __m256i_result[2]) = 0x001c001c001c001c; +- *((unsigned long*)& __m256i_result[1]) = 0x001c001c001c001c; +- *((unsigned long*)& __m256i_result[0]) = 0x001c001c001d001d; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffff08; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffff08; +- *((unsigned long*)& __m256i_result[3]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0fffffff0fffffff; +- __m256i_out = __lasx_xvsat_wu(__m256i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0fffffff0fffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_result[2]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_result[1]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_result[0]) = 0x0fffffff10000006; +- __m256i_out = __lasx_xvaddi_du(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256d_op1[2]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256d_op1[1]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256d_op1[0]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; +- *((unsigned long*)& __m128i_op1[1]) = 0x01017f3c00000148; +- *((unsigned long*)& __m128i_op1[0]) = 0x117d7f7b093d187f; +- *((unsigned long*)& __m128i_result[1]) = 0x117d7f7b093d187f; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000034; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x70); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x117d7f7b093d187f; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000034; +- *((unsigned long*)& __m128i_op1[1]) = 0xfe1bfefe00011ee1; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe1bfe6c03824c60; +- *((unsigned long*)& __m128i_result[1]) = 0x7f7f7f7f0000001a; +- *((unsigned long*)& __m128i_result[0]) = 0x7f7f017f7f7f7f7f; +- __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffe000ffffffff08; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe000ffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffe000ffffffff08; +- *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_op1[2]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_op1[1]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_op1[0]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_result[3]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_result[2]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_result[1]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_result[0]) = 0x00000001fffffff9; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_op0[1]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_result[2]) = 0x10ffffff10000006; +- *((unsigned long*)& __m256i_result[1]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_result[0]) = 0x10ffffff10000006; +- __m256i_out = __lasx_xvfrstpi_b(__m256i_op0,__m256i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000498000000080; +- *((unsigned long*)& __m256i_result[2]) = 0x00004843ffffffe0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000498000000080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000684000000000; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000008; +- __m256i_out = __lasx_xvbitseti_d(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000126000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x2555205ea7bc4020; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000126000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x2555205ea7bc4020; +- *((unsigned long*)& __m256i_op1[3]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_op1[2]) = 0x10ffffff10000006; +- *((unsigned long*)& __m256i_op1[1]) = 0x0fffffff10000006; +- *((unsigned long*)& __m256i_op1[0]) = 0x10ffffff10000006; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000004980008; +- *((unsigned long*)& __m256i_result[2]) = 0x003ffffffc400000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000004980008; +- *((unsigned long*)& __m256i_result[0]) = 0x003ffffffc400000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x46); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x413e276583869d79; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f7f017f9d8726d3; +- *((unsigned long*)& __m128i_op1[1]) = 0x7c7cd2eb63637c52; +- *((unsigned long*)& __m128i_op1[0]) = 0x82ffd2210127add2; +- *((unsigned long*)& __m128i_result[1]) = 0xffc2007aff230027; +- *((unsigned long*)& __m128i_result[0]) = 0x0080005eff600001; +- __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000498000000080; +- *((unsigned long*)& __m256i_result[2]) = 0x000048430000ffe0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000498000000080; +- *((unsigned long*)& __m256i_result[0]) = 0x0000684000000000; +- __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffc2007aff230027; +- *((unsigned long*)& __m128i_op0[0]) = 0x0080005eff600001; +- *((unsigned long*)& __m128i_op1[1]) = 0x01017f3c00000148; +- *((unsigned long*)& __m128i_op1[0]) = 0x117d7f7b093d187f; +- *((unsigned long*)& __m128i_result[1]) = 0xff23002700000148; +- *((unsigned long*)& __m128i_result[0]) = 0xff600001093d187f; +- __m128i_out = __lsx_vpackev_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000497fe0000080; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000683fe0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000497fe0000080; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000683fe0000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffb6811fffff80; +- *((unsigned long*)& __m256i_result[2]) = 0xffff97c120000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffb6811fffff80; +- *((unsigned long*)& __m256i_result[0]) = 0xffff97c120000000; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x413e276583869d79; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f7f017f9d8726d3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x413e276583869d79; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f7f017f9d8726d3; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffc2007a; +- *((int*)& __m128_op0[2]) = 0xff230027; +- *((int*)& __m128_op0[1]) = 0x0080005e; +- *((int*)& __m128_op0[0]) = 0xff600001; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sueq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000117d00007f7b; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000093d0000187f; +- *((unsigned long*)& __m128i_op1[1]) = 0x7d7f027f7c7f7c79; +- *((unsigned long*)& __m128i_op1[0]) = 0x7e7f7e7f027f032f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7d7f13fc7c7ffbf4; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[3]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op1[1]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_result[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_result[0]) = 0x4980008068400000; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffb6811fffff80; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff97c120000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffb6811fffff80; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff97c120000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[0]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000004843ffdff; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00043fff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00043fff00000000; +- __m256i_out = __lasx_xvsrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffb6804cb9; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffb7bbdec0; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffb680489b; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffb7bc02a0; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xfffffffd; +- *((int*)& __m256_result[4]) = 0xfffffffd; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xfffffffd; +- *((int*)& __m256_result[0]) = 0xfffffffd; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffb6811fffff80; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff97c120000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffb6811fffff80; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff97c120000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xdb410010cbe10010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xdb410010cbe10010; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000f0000000f; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffff00000000f; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; +- int_op1 = 0x0000000000000040; +- *((unsigned long*)& __m128i_result[1]) = 0x0fbf0fbf0fbf0fbf; +- *((unsigned long*)& __m128i_result[0]) = 0x0fbf0fbf0fbf0fbf; +- __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x3de00103153ff5fb; +- *((unsigned long*)& __m256d_op0[2]) = 0xbffffffe80000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x3de00103153ff5fb; +- *((unsigned long*)& __m256d_op0[0]) = 0xbffffffe80000000; +- *((unsigned long*)& __m256d_result[3]) = 0x40f69fe73c26f4ee; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x40f69fe73c26f4ee; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff8000000000000; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0xffd27db010d20fbf; +- *((unsigned long*)& __m128i_result[1]) = 0x9727b8499727b849; +- *((unsigned long*)& __m128i_result[0]) = 0x12755900b653f081; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x40f69fe73c26f4ee; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x40f69fe73c26f4ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_result[3]) = 0x40f69fe63c26f4f5; +- *((unsigned long*)& __m256i_result[2]) = 0x7ff7ffff00000007; +- *((unsigned long*)& __m256i_result[1]) = 0x40f69fe63c26f4f5; +- *((unsigned long*)& __m256i_result[0]) = 0x7ff7ffff00000007; +- __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_result[3]) = 0x0fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0fffffffffffffff; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x40f69fe6; +- *((int*)& __m256_op0[6]) = 0x3c26f4f5; +- *((int*)& __m256_op0[5]) = 0x7ff7ffff; +- *((int*)& __m256_op0[4]) = 0x00000007; +- *((int*)& __m256_op0[3]) = 0x40f69fe6; +- *((int*)& __m256_op0[2]) = 0x3c26f4f5; +- *((int*)& __m256_op0[1]) = 0x7ff7ffff; +- *((int*)& __m256_op0[0]) = 0x00000007; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9727b8499727b849; +- *((unsigned long*)& __m128i_op0[0]) = 0x12755900b653f081; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7d7f13fc7c7ffbf4; +- *((unsigned long*)& __m128i_result[1]) = 0xffff9727ffff9727; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffe79ffffba5f; +- __m128i_out = __lsx_vhsubw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffff9727; +- *((int*)& __m128_op0[2]) = 0xffff9727; +- *((int*)& __m128_op0[1]) = 0xfffffe79; +- *((int*)& __m128_op0[0]) = 0xffffba5f; +- *((int*)& __m128_result[3]) = 0xffff9727; +- *((int*)& __m128_result[2]) = 0xffff9727; +- *((int*)& __m128_result[1]) = 0xfffffe79; +- *((int*)& __m128_result[0]) = 0xffffba5f; +- __m128_out = __lsx_vfsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf00040fbf; +- *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf00000fbf; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x9727b8499727b849; +- *((unsigned long*)& __m128i_op2[0]) = 0x12755900b653f081; +- *((unsigned long*)& __m128i_result[1]) = 0x00060fbf00040fbf; +- *((unsigned long*)& __m128i_result[0]) = 0x00020fbf00000fbf; +- __m128i_out = __lsx_vmadd_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x0fffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x0fffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x0fffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x0fffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000555889; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000002580f01; +- *((unsigned long*)& __m128i_result[1]) = 0x0010000000455889; +- *((unsigned long*)& __m128i_result[0]) = 0x0010000002480f01; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff9727ffff9727; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffe79ffffba5f; +- *((unsigned long*)& __m128i_result[1]) = 0xffff972700000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffba5f00000000; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x20); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf00040fbf; +- *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf00000fbf; +- *((unsigned long*)& __m128i_result[1]) = 0x00060fbf02040fbf; +- *((unsigned long*)& __m128i_result[0]) = 0x00020fbf02000fbf; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000002c21ffeff; +- *((unsigned long*)& __m256i_op0[2]) = 0xc0000000c0000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000002c21ffeff; +- *((unsigned long*)& __m256i_op0[0]) = 0xc0000000c0000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op0[2]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op0[1]) = 0x498000804843ffe0; +- *((unsigned long*)& __m256i_op0[0]) = 0x4980008068400000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x498100814843ffe1; +- *((unsigned long*)& __m256i_result[2]) = 0x4981008168410001; +- *((unsigned long*)& __m256i_result[1]) = 0x498100814843ffe1; +- *((unsigned long*)& __m256i_result[0]) = 0x4981008168410001; +- __m256i_out = __lasx_xvbitset_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x49810081; +- *((int*)& __m256_op1[6]) = 0x4843ffe1; +- *((int*)& __m256_op1[5]) = 0x49810081; +- *((int*)& __m256_op1[4]) = 0x68410001; +- *((int*)& __m256_op1[3]) = 0x49810081; +- *((int*)& __m256_op1[2]) = 0x4843ffe1; +- *((int*)& __m256_op1[1]) = 0x49810081; +- *((int*)& __m256_op1[0]) = 0x68410001; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x80000000; +- *((int*)& __m256_result[6]) = 0x80000000; +- *((int*)& __m256_result[5]) = 0x80000000; +- *((int*)& __m256_result[4]) = 0x80000000; +- *((int*)& __m256_result[3]) = 0x80000000; +- *((int*)& __m256_result[2]) = 0x80000000; +- *((int*)& __m256_result[1]) = 0x80000000; +- *((int*)& __m256_result[0]) = 0x80000000; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000001fffffff9; +- *((unsigned long*)& __m256i_result[3]) = 0x9ffffd8020010001; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff9fffffff9; +- *((unsigned long*)& __m256i_result[1]) = 0x9ffffd8020010001; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff9fffffff9; +- __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x9ffffd8020010001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffff9fffffff9; +- *((unsigned long*)& __m256i_op0[1]) = 0x9ffffd8020010001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffff9fffffff9; +- *((unsigned long*)& __m256i_op1[3]) = 0x40f69fe73c26f4ee; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x40f69fe73c26f4ee; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000018ffff2b13; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000018ffff2b13; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvhsubw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf00040fbf; +- *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf00000fbf; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0xffffac5cffffac5c; +- *((unsigned long*)& __m128i_result[0]) = 0xffffac5cffffac5c; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000555889; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000002580f01; +- *((unsigned long*)& __m128i_op1[1]) = 0x00060fbf02040fbf; +- *((unsigned long*)& __m128i_op1[0]) = 0x00020fbf02000fbf; +- *((unsigned long*)& __m128i_result[1]) = 0x00060fbf02596848; +- *((unsigned long*)& __m128i_result[0]) = 0x00020fbf04581ec0; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff9727ffff9727; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffe79ffffba5f; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x010169d9010169d9; +- *((unsigned long*)& __m128i_result[0]) = 0x01010287010146a1; +- __m128i_out = __lsx_vdiv_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x498100814843ffe1; +- *((unsigned long*)& __m256i_op0[2]) = 0x4981008168410001; +- *((unsigned long*)& __m256i_op0[1]) = 0x498100814843ffe1; +- *((unsigned long*)& __m256i_op0[0]) = 0x4981008168410001; +- *((unsigned long*)& __m256i_op1[3]) = 0x40f69fe73c26f4ee; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x40f69fe73c26f4ee; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff896099cbdbfff1; +- *((unsigned long*)& __m256i_result[2]) = 0xc987ffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xff896099cbdbfff1; +- *((unsigned long*)& __m256i_result[0]) = 0xc987ffffffffffff; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00060fbf02596848; +- *((unsigned long*)& __m128i_op0[0]) = 0x00020fbf04581ec0; +- *((unsigned long*)& __m128i_op1[1]) = 0x010169d9010169d9; +- *((unsigned long*)& __m128i_op1[0]) = 0x01010287010146a1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000200000001; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op1[1]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffac5cffffac5c; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffac5cffffac5c; +- *((unsigned long*)& __m128i_op1[1]) = 0x010169d9010169d9; +- *((unsigned long*)& __m128i_op1[0]) = 0x01010287010146a1; +- *((unsigned long*)& __m128i_result[1]) = 0xff01ff01ac025c87; +- *((unsigned long*)& __m128i_result[0]) = 0xff01ff01ac465ca1; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff896099cbdbfff1; +- *((unsigned long*)& __m256i_op0[2]) = 0xc987ffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xff896099cbdbfff1; +- *((unsigned long*)& __m256i_op0[0]) = 0xc987ffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00769f673424000f; +- *((unsigned long*)& __m256i_result[2]) = 0x3678000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x00769f673424000f; +- *((unsigned long*)& __m256i_result[0]) = 0x3678000100000001; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0xffd27db010d20fbf; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0xffd27db010d20fbf; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0xffa4fb6021a41f7e; +- __m128i_out = __lsx_vaddwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_result[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_result[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_result[1]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_result[0]) = 0xfffffffffffffff8; +- __m256d_out = __lasx_xvfrint_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x9ffffd8020010001; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffff9fffffff9; +- *((unsigned long*)& __m256i_op1[1]) = 0x9ffffd8020010001; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff9fffffff9; +- *((unsigned long*)& __m256i_result[3]) = 0x00009fff00002001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00009fff00002001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00060fbf; +- *((int*)& __m128_op0[2]) = 0x02040fbf; +- *((int*)& __m128_op0[1]) = 0x00020fbf; +- *((int*)& __m128_op0[0]) = 0x02000fbf; +- *((int*)& __m128_op1[3]) = 0x63636363; +- *((int*)& __m128_op1[2]) = 0x63636363; +- *((int*)& __m128_op1[1]) = 0xffd27db0; +- *((int*)& __m128_op1[0]) = 0x10d20fbf; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; +- __m256i_out = __lasx_xvmax_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00009fff; +- *((int*)& __m256_op0[6]) = 0x00002001; +- *((int*)& __m256_op0[5]) = 0x0000ffff; +- *((int*)& __m256_op0[4]) = 0x0000ffff; +- *((int*)& __m256_op0[3]) = 0x00009fff; +- *((int*)& __m256_op0[2]) = 0x00002001; +- *((int*)& __m256_op0[1]) = 0x0000ffff; +- *((int*)& __m256_op0[0]) = 0x0000ffff; +- *((int*)& __m256_op1[7]) = 0xfffeb683; +- *((int*)& __m256_op1[6]) = 0x9ffffd80; +- *((int*)& __m256_op1[5]) = 0xfffe97c0; +- *((int*)& __m256_op1[4]) = 0x20010001; +- *((int*)& __m256_op1[3]) = 0xfffeb683; +- *((int*)& __m256_op1[2]) = 0x9ffffd80; +- *((int*)& __m256_op1[1]) = 0xfffe97c0; +- *((int*)& __m256_op1[0]) = 0x20010001; +- *((int*)& __m256_op2[7]) = 0x00009fff; +- *((int*)& __m256_op2[6]) = 0x00002001; +- *((int*)& __m256_op2[5]) = 0x0000ffff; +- *((int*)& __m256_op2[4]) = 0x0000ffff; +- *((int*)& __m256_op2[3]) = 0x00009fff; +- *((int*)& __m256_op2[2]) = 0x00002001; +- *((int*)& __m256_op2[1]) = 0x0000ffff; +- *((int*)& __m256_op2[0]) = 0x0000ffff; +- *((int*)& __m256_result[7]) = 0xfffeb683; +- *((int*)& __m256_result[6]) = 0x80002001; +- *((int*)& __m256_result[5]) = 0xfffe97c0; +- *((int*)& __m256_result[4]) = 0x8000ffff; +- *((int*)& __m256_result[3]) = 0xfffeb683; +- *((int*)& __m256_result[2]) = 0x80002001; +- *((int*)& __m256_result[1]) = 0xfffe97c0; +- *((int*)& __m256_result[0]) = 0x8000ffff; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb68380002001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c08000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb68380002001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c08000ffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000007fff5b41c0; +- *((unsigned long*)& __m256i_result[2]) = 0x0000007fff5b41d0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000007fff5b41c0; +- *((unsigned long*)& __m256i_result[0]) = 0x0000007fff5b41d0; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x59); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00009fff; +- *((int*)& __m256_op0[6]) = 0x00002001; +- *((int*)& __m256_op0[5]) = 0x0000ffff; +- *((int*)& __m256_op0[4]) = 0x0000ffff; +- *((int*)& __m256_op0[3]) = 0x00009fff; +- *((int*)& __m256_op0[2]) = 0x00002001; +- *((int*)& __m256_op0[1]) = 0x0000ffff; +- *((int*)& __m256_op0[0]) = 0x0000ffff; +- *((int*)& __m256_op1[7]) = 0xfffeb683; +- *((int*)& __m256_op1[6]) = 0x9ffffd80; +- *((int*)& __m256_op1[5]) = 0xfffe97c0; +- *((int*)& __m256_op1[4]) = 0x20010001; +- *((int*)& __m256_op1[3]) = 0xfffeb683; +- *((int*)& __m256_op1[2]) = 0x9ffffd80; +- *((int*)& __m256_op1[1]) = 0xfffe97c0; +- *((int*)& __m256_op1[0]) = 0x20010001; +- *((int*)& __m256_result[7]) = 0x00009fff; +- *((int*)& __m256_result[6]) = 0x9ffffd80; +- *((int*)& __m256_result[5]) = 0x0000ffff; +- *((int*)& __m256_result[4]) = 0x20010001; +- *((int*)& __m256_result[3]) = 0x00009fff; +- *((int*)& __m256_result[2]) = 0x9ffffd80; +- *((int*)& __m256_result[1]) = 0x0000ffff; +- *((int*)& __m256_result[0]) = 0x20010001; +- __m256_out = __lasx_xvfmaxa_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff01ff01ac025c87; +- *((unsigned long*)& __m128i_op0[0]) = 0xff01ff01ac465ca1; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff01ff0100000000; +- *((unsigned long*)& __m128i_result[0]) = 0xac465ca100000000; +- __m128i_out = __lsx_vilvl_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00009fff00002001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00009fff00002001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000200000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x6363636163636363; +- __m128i_out = __lsx_vsubwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00009fff9ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff20010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x00009fff9ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff20010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00002080df5b41cf; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00002080df5b41cf; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000009fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff40a6; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000009fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff40a6; +- __m256i_out = __lasx_xvsubwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xff01ff01ac025c87; +- *((unsigned long*)& __m128i_op1[0]) = 0xff01ff01ac465ca1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636163636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfffeb683; +- *((int*)& __m256_op0[6]) = 0x9ffffd80; +- *((int*)& __m256_op0[5]) = 0xfffe97c0; +- *((int*)& __m256_op0[4]) = 0x20010001; +- *((int*)& __m256_op0[3]) = 0xfffeb683; +- *((int*)& __m256_op0[2]) = 0x9ffffd80; +- *((int*)& __m256_op0[1]) = 0xfffe97c0; +- *((int*)& __m256_op0[0]) = 0x20010001; +- *((int*)& __m256_op1[7]) = 0x00009fff; +- *((int*)& __m256_op1[6]) = 0x9ffffd80; +- *((int*)& __m256_op1[5]) = 0x0000ffff; +- *((int*)& __m256_op1[4]) = 0x20010001; +- *((int*)& __m256_op1[3]) = 0x00009fff; +- *((int*)& __m256_op1[2]) = 0x9ffffd80; +- *((int*)& __m256_op1[1]) = 0x0000ffff; +- *((int*)& __m256_op1[0]) = 0x20010001; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00002080; +- *((int*)& __m256_op2[4]) = 0xdf5b41cf; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00002080; +- *((int*)& __m256_op2[0]) = 0xdf5b41cf; +- *((int*)& __m256_result[7]) = 0xfffeb683; +- *((int*)& __m256_result[6]) = 0x007ffd80; +- *((int*)& __m256_result[5]) = 0xfffe97c0; +- *((int*)& __m256_result[4]) = 0xdf5b41cf; +- *((int*)& __m256_result[3]) = 0xfffeb683; +- *((int*)& __m256_result[2]) = 0x007ffd80; +- *((int*)& __m256_result[1]) = 0xfffe97c0; +- *((int*)& __m256_result[0]) = 0xdf5b41cf; +- __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m256_op0[7]) = 0xfffeb683; +- *((int*)& __m256_op0[6]) = 0x9ffffd80; +- *((int*)& __m256_op0[5]) = 0xfffe97c0; +- *((int*)& __m256_op0[4]) = 0x20010001; +- *((int*)& __m256_op0[3]) = 0xfffeb683; +- *((int*)& __m256_op0[2]) = 0x9ffffd80; +- *((int*)& __m256_op0[1]) = 0xfffe97c0; +- *((int*)& __m256_op0[0]) = 0x20010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvftintrpl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000019ffdf403; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000011ffd97c3; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000019ffdf403; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000011ffd97c3; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffeb8649d0d6250; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffeb8649d0d6250; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op2[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op2[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op2[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op2[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x7f800000; +- *((int*)& __m256_result[6]) = 0x7f800000; +- *((int*)& __m256_result[5]) = 0x7f800000; +- *((int*)& __m256_result[4]) = 0x7f800000; +- *((int*)& __m256_result[3]) = 0x7f800000; +- *((int*)& __m256_result[2]) = 0x7f800000; +- *((int*)& __m256_result[1]) = 0x7f800000; +- *((int*)& __m256_result[0]) = 0x7f800000; +- __m256_out = __lasx_xvfrecip_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000200000001; +- __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000200000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0xff01ff01ac025c87; +- *((unsigned long*)& __m128i_op1[0]) = 0xff01ff01ac465ca1; +- *((unsigned long*)& __m128i_result[1]) = 0x64616462b76106dc; +- *((unsigned long*)& __m128i_result[0]) = 0x64616462b71d06c2; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000019ffdf403; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000011ffd97c3; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000019ffdf403; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000011ffd97c3; +- *((unsigned long*)& __m256i_result[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_result[2]) = 0x000000019ffdf403; +- *((unsigned long*)& __m256i_result[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_result[0]) = 0x000000019ffdf403; +- __m256i_out = __lasx_xvilvh_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x64616462b76106dc; +- *((unsigned long*)& __m128i_op1[0]) = 0x64616462b71d06c2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000001; +- *((int*)& __m256_op1[6]) = 0x9ffdf403; +- *((int*)& __m256_op1[5]) = 0x00000001; +- *((int*)& __m256_op1[4]) = 0x1ffd97c3; +- *((int*)& __m256_op1[3]) = 0x00000001; +- *((int*)& __m256_op1[2]) = 0x9ffdf403; +- *((int*)& __m256_op1[1]) = 0x00000001; +- *((int*)& __m256_op1[0]) = 0x1ffd97c3; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000200a000020020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000200a000020020; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256d_op1[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00020000ffff0001; +- __m128i_out = __lsx_vssrarn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000001; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00010001; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00060fbf; +- *((int*)& __m128_op1[2]) = 0x02040fbf; +- *((int*)& __m128_op1[1]) = 0x00020fbf; +- *((int*)& __m128_op1[0]) = 0x02000fbf; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000200000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000400000001; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000200000001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_h(__m128i_op0,7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00020000; +- *((int*)& __m128_op0[0]) = 0xffff0001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256d_op1[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256d_op1[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_l_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb683007ffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c0df5b41cf; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb683007ffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c0df5b41cf; +- *((unsigned long*)& __m256i_result[3]) = 0xfffeb664007ffd61; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe97a1df5b41b0; +- *((unsigned long*)& __m256i_result[1]) = 0xfffeb664007ffd61; +- *((unsigned long*)& __m256i_result[0]) = 0xfffe97a1df5b41b0; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00020000; +- *((int*)& __m128_op0[0]) = 0xffff0001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffeb664007ffd61; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe97a1df5b41b0; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffeb664007ffd61; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffe97a1df5b41b0; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff007ffd61; +- *((unsigned long*)& __m256i_result[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff007ffd61; +- *((unsigned long*)& __m256i_result[0]) = 0xfffe97c020010001; +- __m256i_out = __lasx_xvextrins_w(__m256i_op0,__m256i_op1,0x62); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000fffe00009fff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fffe00002001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000fffe00009fff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fffe00002001; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_hu_w(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0002000400000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0003000500000001; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x63636363; +- *((int*)& __m128_op0[2]) = 0x63636363; +- *((int*)& __m128_op0[1]) = 0x63636363; +- *((int*)& __m128_op0[0]) = 0x63636363; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vftint_wu_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfffeb664; +- *((int*)& __m256_op0[6]) = 0x007ffd61; +- *((int*)& __m256_op0[5]) = 0xfffe97a1; +- *((int*)& __m256_op0[4]) = 0xdf5b41b0; +- *((int*)& __m256_op0[3]) = 0xfffeb664; +- *((int*)& __m256_op0[2]) = 0x007ffd61; +- *((int*)& __m256_op0[1]) = 0xfffe97a1; +- *((int*)& __m256_op0[0]) = 0xdf5b41b0; +- *((int*)& __m256_op1[7]) = 0xfffeb683; +- *((int*)& __m256_op1[6]) = 0x9ffffd80; +- *((int*)& __m256_op1[5]) = 0xfffe97c0; +- *((int*)& __m256_op1[4]) = 0x20010001; +- *((int*)& __m256_op1[3]) = 0xfffeb683; +- *((int*)& __m256_op1[2]) = 0x9ffffd80; +- *((int*)& __m256_op1[1]) = 0xfffe97c0; +- *((int*)& __m256_op1[0]) = 0x20010001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_sueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000001faf19b60; +- *((unsigned long*)& __m256i_op1[2]) = 0x6c2905ae7c14c561; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000001faf19b60; +- *((unsigned long*)& __m256i_op1[0]) = 0x6c2905ae7c14c561; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x94d7fb5200000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x94d7fb5200000000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00020004; +- *((int*)& __m128_op0[0]) = 0x00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000e3ab0001352b; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000e3ab0001352b; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff00007fff0000; +- __m256i_out = __lasx_xvssrlni_h_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff00007fff0000; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; +- __m256i_out = __lasx_xvmsub_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x94d7fb5200000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x94d7fb5200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_op2[2]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000038ea4d4a; +- *((unsigned long*)& __m256i_op2[0]) = 0x7fff00007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x94d7fb5200000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x94d7fb5200000000; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_h(__m128i_op0,12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001388928513889; +- *((unsigned long*)& __m128i_op0[0]) = 0x006938094a013889; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001388928513889; +- *((unsigned long*)& __m128i_op1[0]) = 0x006938094a013889; +- *((unsigned long*)& __m128i_result[1]) = 0x0002711250a27112; +- *((unsigned long*)& __m128i_result[0]) = 0x00d2701294027112; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffeb683007ffd80; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffe97c0df5b41cf; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffeb683007ffd80; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffe97c0df5b41cf; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001497c98ea4fca; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001497c98ea4fca; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0001497c98ea4fca; +- *((unsigned long*)& __m256i_op2[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0x0001497c98ea4fca; +- *((unsigned long*)& __m256i_op2[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000006715b036; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x000000006715b036; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmadd_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0xfffeb664; +- *((int*)& __m256_op1[6]) = 0x007ffd61; +- *((int*)& __m256_op1[5]) = 0xfffe97a1; +- *((int*)& __m256_op1[4]) = 0xdf5b41b0; +- *((int*)& __m256_op1[3]) = 0xfffeb664; +- *((int*)& __m256_op1[2]) = 0x007ffd61; +- *((int*)& __m256_op1[1]) = 0xfffe97a1; +- *((int*)& __m256_op1[0]) = 0xdf5b41b0; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x94d7fb52; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xfffeb664; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xfffe97a1; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xfffeb664; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xfffe97a1; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000003fffffffd; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000003fffffffd; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000003fffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0x00000003fffffffd; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002711250a27112; +- *((unsigned long*)& __m128i_op1[0]) = 0x00d2701294027112; +- *((unsigned long*)& __m128i_result[1]) = 0xffff7112ffff7112; +- *((unsigned long*)& __m128i_result[0]) = 0xffff7012ffff7112; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002711250a27112; +- *((unsigned long*)& __m128i_op0[0]) = 0x00d2701294027112; +- *((unsigned long*)& __m128i_result[1]) = 0x080a791a58aa791a; +- *((unsigned long*)& __m128i_result[0]) = 0x08da781a9c0a791a; +- __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x94d7fb5200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffeb6839ffffd80; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffe97c020010001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00020000; +- *((int*)& __m128_op0[0]) = 0xffff0001; +- *((int*)& __m128_op1[3]) = 0x63636363; +- *((int*)& __m128_op1[2]) = 0x63636363; +- *((int*)& __m128_op1[1]) = 0x63636363; +- *((int*)& __m128_op1[0]) = 0x63636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; +- __m256i_out = __lasx_xvssrlrni_w_d(__m256i_op0,__m256i_op1,0x3c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00020000ffff0001; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00020000ffff0001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000003030000; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00020000ffff0001; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100000001; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffeb664007ffd61; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffe97a1df5b41b0; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffeb664007ffd61; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffe97a1df5b41b0; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffeb664007ffd61; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffe97a1df5b41b0; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffeb664007ffd61; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffe97a1df5b41b0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0xc1f03e1042208410; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x00f0001000000010; +- __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xf000f000f000f000; +- *((unsigned long*)& __m256i_op1[2]) = 0xf000f010f000f010; +- *((unsigned long*)& __m256i_op1[1]) = 0xf000f000f000f000; +- *((unsigned long*)& __m256i_op1[0]) = 0xf000f010f000f010; +- *((unsigned long*)& __m256i_result[3]) = 0x00f0000000f00010; +- *((unsigned long*)& __m256i_result[2]) = 0xfff0ff00fff0ff10; +- *((unsigned long*)& __m256i_result[1]) = 0x00f0000000f00010; +- *((unsigned long*)& __m256i_result[0]) = 0xfff0ff00fff0ff10; +- __m256i_out = __lasx_xvilvl_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_bu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00f0001000000010; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x00f0001000000010; +- __m128i_out = __lsx_vsrai_h(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002711350a27112; +- *((unsigned long*)& __m128i_op1[0]) = 0x00d5701794027113; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff61010380; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff61010380; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000006; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000006; +- __m256i_out = __lasx_xvmini_du(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00ff00ef00ff010f; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff010f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_op1[0]) = 0xc1f03e1042208410; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001000110; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000431f851f; +- __m128i_out = __lsx_vaddwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffeffff97a1; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffdf5b000041b0; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffeffff97a1; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffdf5b000041b0; +- *((unsigned long*)& __m256i_result[3]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_result[2]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_result[0]) = 0x000020a4ffffbe4f; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00027113; +- *((int*)& __m128_op0[2]) = 0x50a27112; +- *((int*)& __m128_op0[1]) = 0x00d57017; +- *((int*)& __m128_op0[0]) = 0x94027113; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002711350a27112; +- *((unsigned long*)& __m128i_op0[0]) = 0x00d5701794027113; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_du_q(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfff0ff000000000f; +- *((unsigned long*)& __m256i_op0[2]) = 0x000f00f000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfff0ff000000000f; +- *((unsigned long*)& __m256i_op0[0]) = 0x000f00f000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00f8000000000008; +- *((unsigned long*)& __m256i_result[2]) = 0x000800f800000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00f8000000000008; +- *((unsigned long*)& __m256i_result[0]) = 0x000800f800000000; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000110; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000431f851f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001011010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000043431f1f; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xf0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xf2f444429d96dbe1; +- *((unsigned long*)& __m128d_op0[0]) = 0xddd76c75f2f44442; +- *((unsigned long*)& __m128d_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128d_op1[0]) = 0xc1f03e1042208410; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_op0[2]) = 0x07fed3c8f7ad28d0; +- *((unsigned long*)& __m256i_op0[1]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_op0[0]) = 0x07fed3c8f7ad28d0; +- *((unsigned long*)& __m256i_result[3]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_result[2]) = 0x07fed3c8f7ad28d0; +- *((unsigned long*)& __m256i_result[1]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_result[0]) = 0x07fed3c8f7ad28d0; +- __m256i_out = __lasx_xvmaxi_wu(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x400000003fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x4000000040000000; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x400000003fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x4000000040000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x400000003fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x4000000040000000; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x400000003fffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0x4000000040000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffeffff97a1; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffdf5b000041b0; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffeffff97a1; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffdf5b000041b0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_op2[2]) = 0x07fed3c8f7ad28d0; +- *((unsigned long*)& __m256i_op2[1]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_op2[0]) = 0x07fed3c8f7ad28d0; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffeffff97a1; +- *((unsigned long*)& __m256i_result[2]) = 0xffffdf5b000041b0; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffeffff97a1; +- *((unsigned long*)& __m256i_result[0]) = 0xffffdf5b000041b0; +- __m256i_out = __lasx_xvmaddwev_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00f0000000f00010; +- *((unsigned long*)& __m256i_op1[2]) = 0xfff0ff00fff0ff10; +- *((unsigned long*)& __m256i_op1[1]) = 0x00f0000000f00010; +- *((unsigned long*)& __m256i_op1[0]) = 0xfff0ff00fff0ff10; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_w(__m256i_op0,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffeffff97a1; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffdf5b000041b0; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffeffff97a1; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffdf5b000041b0; +- *((unsigned long*)& __m256i_op1[3]) = 0x00f8000000000008; +- *((unsigned long*)& __m256i_op1[2]) = 0x000800f800000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00f8000000000008; +- *((unsigned long*)& __m256i_op1[0]) = 0x000800f800000000; +- *((unsigned long*)& __m256i_result[3]) = 0xe3f7fff7fffcbd08; +- *((unsigned long*)& __m256i_result[2]) = 0x0dbfa28000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xe3f7fff7fffcbd08; +- *((unsigned long*)& __m256i_result[0]) = 0x0dbfa28000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00f0000000f00010; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0ff00fff0ff10; +- *((unsigned long*)& __m256i_op0[1]) = 0x00f0000000f00010; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0ff00fff0ff10; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0087ff87f807ff87; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0087ff87f807ff87; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x004001be00dc008e; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ffff0100010001; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x004001be00dc008e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_op1[2]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_op1[0]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_result[3]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_result[2]) = 0x000000a400ff004f; +- *((unsigned long*)& __m256i_result[1]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_result[0]) = 0x000000a400ff004f; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0087ff87f807ff87; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0087ff87f807ff87; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmina_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000000000; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00ff00ef; +- *((int*)& __m128_op0[2]) = 0x00ff010f; +- *((int*)& __m128_op0[1]) = 0x00ff00ff; +- *((int*)& __m128_op0[0]) = 0x00ff010f; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfrint_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e10; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000400080003fff; +- __m128i_out = __lsx_vexth_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_op0[2]) = 0x07fed3c8f7ad28d0; +- *((unsigned long*)& __m256i_op0[1]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_op0[0]) = 0x07fed3c8f7ad28d0; +- *((unsigned long*)& __m256i_result[3]) = 0x01c03f8034c03200; +- *((unsigned long*)& __m256i_result[2]) = 0x3dc02b400a003400; +- *((unsigned long*)& __m256i_result[1]) = 0x01c03f8034c03200; +- *((unsigned long*)& __m256i_result[0]) = 0x3dc02b400a003400; +- __m256i_out = __lasx_xvsllwil_hu_bu(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_d(__m256i_op0,0x23); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x01c03f8034c03200; +- *((unsigned long*)& __m256i_op0[2]) = 0x3dc02b400a003400; +- *((unsigned long*)& __m256i_op0[1]) = 0x01c03f8034c03200; +- *((unsigned long*)& __m256i_op0[0]) = 0x3dc02b400a003400; +- *((unsigned long*)& __m256i_op1[3]) = 0x01c03f8034c03200; +- *((unsigned long*)& __m256i_op1[2]) = 0x3dc02b400a003400; +- *((unsigned long*)& __m256i_op1[1]) = 0x01c03f8034c03200; +- *((unsigned long*)& __m256i_op1[0]) = 0x3dc02b400a003400; +- *((unsigned long*)& __m256i_op2[3]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_op2[2]) = 0x07fed3c8f7ad28d0; +- *((unsigned long*)& __m256i_op2[1]) = 0x07fee332883f86b0; +- *((unsigned long*)& __m256i_op2[0]) = 0x07fed3c8f7ad28d0; +- *((unsigned long*)& __m256i_result[3]) = 0x01ce3c0050d32d40; +- *((unsigned long*)& __m256i_result[2]) = 0x3fadafc013acf600; +- *((unsigned long*)& __m256i_result[1]) = 0x01ce3c0050d32d40; +- *((unsigned long*)& __m256i_result[0]) = 0x3fadafc013acf600; +- __m256i_out = __lasx_xvmaddwod_w_hu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e10; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000400080003fff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000bc2000007e04; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e04; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000400080003fff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000bc2000007e04; +- *((unsigned long*)& __m128i_result[1]) = 0xffffbfff7fffc000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff43dfffff81fb; +- __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_result[3]) = 0x97a297a297a297a2; +- *((unsigned long*)& __m256i_result[2]) = 0x97a297a297a297a2; +- *((unsigned long*)& __m256i_result[1]) = 0x97a297a297a297a2; +- *((unsigned long*)& __m256i_result[0]) = 0x97a297a297a297a2; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000234545b; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0dec4d1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000002345454; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c0dec4ca; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffbfff7fffc000; +- *((unsigned long*)& __m128d_op0[0]) = 0xffff43dfffff81fb; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000a400ff004f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000a400ff004f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000010000005e; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; +- *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000002345454; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0dec4ca; +- *((unsigned long*)& __m128i_result[1]) = 0x000030ebffffffdc; +- *((unsigned long*)& __m128i_result[0]) = 0x00000203ffffff25; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000002345454; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0dec4ca; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000060006; +- __m128i_out = __lsx_vsrli_h(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_bu_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000a400ff004f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000a400ff004f; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000a400ff004f; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000a400ff004f; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfcvtl_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x000000010000685e; +- *((unsigned long*)& __m256d_op0[2]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256d_op0[1]) = 0x000000010000685e; +- *((unsigned long*)& __m256d_op0[0]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256d_op1[3]) = 0x0087ff87f807ff87; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0087ff87f807ff87; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xb3b3b3b3b3b3b3b3; +- *((unsigned long*)& __m256i_result[2]) = 0xb3b3b3b3b3b3b3b3; +- *((unsigned long*)& __m256i_result[1]) = 0xb3b3b3b3b3b3b3b3; +- *((unsigned long*)& __m256i_result[0]) = 0xb3b3b3b3b3b3b3b3; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0x4c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0dec4d1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff3f213b2f; +- __m128i_out = __lsx_vssub_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_op2[2]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_op2[0]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000203000010d0; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffc00300000220; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x27); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; +- *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; +- *((unsigned long*)& __m128i_result[1]) = 0x30eb022002101b20; +- *((unsigned long*)& __m128i_result[0]) = 0x020310edc003023d; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; +- *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; +- *((unsigned long*)& __m128i_op1[1]) = 0x30eb020302101b03; +- *((unsigned long*)& __m128i_op1[0]) = 0x020310d0c0030220; +- *((unsigned long*)& __m128i_result[1]) = 0x30eb022002101b20; +- *((unsigned long*)& __m128i_result[0]) = 0x020310edc003023d; +- __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000c0dec4d1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000040223c2e; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x000000010000685e; +- *((unsigned long*)& __m256d_op1[2]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256d_op1[1]) = 0x000000010000685e; +- *((unsigned long*)& __m256d_op1[0]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; +- *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000c0dec4d1; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vseq_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_op1[2]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_op1[0]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_result[3]) = 0x00000003ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0001ffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000003ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0001ffffffffffff; +- __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000003ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001ffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000003ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001ffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000010000005e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_w_d(__m256i_op0,__m256i_op1,0x3c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[1]) = 0x000b000b000b000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000b000b000b000b; +- __m128i_out = __lsx_vmaxi_h(__m128i_op0,11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x30eb020302101b03; +- *((unsigned long*)& __m128i_op0[0]) = 0x020310d0c0030220; +- *((unsigned long*)& __m128i_op1[1]) = 0x30eb020302101b03; +- *((unsigned long*)& __m128i_op1[0]) = 0x020310d0c0030220; +- *((unsigned long*)& __m128i_result[1]) = 0x020310d0c0030220; +- *((unsigned long*)& __m128i_result[0]) = 0x020310d0c0030220; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000b000b000b000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x000b000b000b000b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000b000b000b000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000b000b000b000b; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x30eb022002101b20; +- *((unsigned long*)& __m128i_op1[0]) = 0x020310edc003023d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0xffff97a2; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0xffff97a2; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvfcmp_seq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001010000; +- __m256i_out = __lasx_xvdiv_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; +- *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffc3; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_op0[2]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000010000685e; +- *((unsigned long*)& __m256i_op0[0]) = 0x000020a4ffffbe4f; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000008; +- *((unsigned long*)& __m256i_result[2]) = 0x000000040000001b; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000008; +- *((unsigned long*)& __m256i_result[0]) = 0x000000040000001b; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x01010000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x01010000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x101b0330eb022002; +- *((unsigned long*)& __m128i_op0[0]) = 0x030220020310edc0; +- *((unsigned long*)& __m128i_result[1]) = 0x0080800080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080008000; +- __m128i_out = __lsx_vslli_b(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; +- *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x020310edc003023d; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmaxa_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff97a2; +- __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; +- __m256i_out = __lasx_xvmod_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000008; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000040000001b; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000008; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000040000001b; +- *((unsigned long*)& __m256i_op1[3]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x021b7d24; +- *((int*)& __m128_op0[2]) = 0x49678a35; +- *((int*)& __m128_op0[1]) = 0x030298a6; +- *((int*)& __m128_op0[0]) = 0x21030a49; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000002; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0020004000400040; +- *((unsigned long*)& __m256i_result[2]) = 0x0020004000400040; +- *((unsigned long*)& __m256i_result[1]) = 0x0020004000400040; +- *((unsigned long*)& __m256i_result[0]) = 0x0020004000400040; +- __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x328e1080889415a0; +- *((unsigned long*)& __m128i_op0[0]) = 0x3960b1a401811060; +- *((unsigned long*)& __m128i_op1[1]) = 0x328e1080889415a0; +- *((unsigned long*)& __m128i_op1[0]) = 0x3960b1a401811060; +- *((unsigned long*)& __m128i_op2[1]) = 0x020310edc003023d; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x32f3c7a38f9f4b8b; +- *((unsigned long*)& __m128i_result[0]) = 0x2c9e5069f5d57780; +- __m128i_out = __lsx_vmaddwod_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssub_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_b_h(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000027; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000027; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x020310edc003023d; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000080c43b700; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x56); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000027; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000027; +- *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefe7f; +- *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefe7f; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff97a2; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_d_q(__m256i_op0,__m256i_op1,0x3f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x30eb022002101b20; +- *((unsigned long*)& __m128i_op0[0]) = 0x020310edc003023d; +- *((unsigned long*)& __m128i_op1[1]) = 0x30eb022002101b20; +- *((unsigned long*)& __m128i_op1[0]) = 0x020310edc003023d; +- *((unsigned long*)& __m128i_result[1]) = 0x022002101b200203; +- *((unsigned long*)& __m128i_result[0]) = 0x022002101b200203; +- __m128i_out = __lsx_vsrlni_d_q(__m128i_op0,__m128i_op1,0x30); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000027; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000027; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; +- __m256i_out = __lasx_xvslti_hu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x022002101b200203; +- *((unsigned long*)& __m128i_op0[0]) = 0x022002101b200203; +- *((unsigned long*)& __m128i_op1[1]) = 0x022002101b200203; +- *((unsigned long*)& __m128i_op1[0]) = 0x022002101b200203; +- *((unsigned long*)& __m128i_op2[1]) = 0x000000080c43b700; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x036caeeca7592703; +- *((unsigned long*)& __m128i_result[0]) = 0x022002101b200203; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_h(__m128i_op0,14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x021b7d2449678a35; +- *((unsigned long*)& __m128i_op0[0]) = 0x030298a621030a49; +- int_result = 0xffffffffffff8a35; +- int_out = __lsx_vpickve2gr_h(__m128i_op0,0x4); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00000000abba7980; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000ccf98000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000001010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000001010000; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00c0c000c0000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc0000000c000c000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000027; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000027; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000027; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000027; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x6); +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff; +- __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00c0c000c0000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xc0000000c000c000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00c0c000c0000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xc0000000c000c000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x001e001e001e001e; +- *((unsigned long*)& __m128i_result[0]) = 0x001e001e001e001e; +- __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvaddwod_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010001; +- __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvshuf_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,-4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00010001; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00010001; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00010001; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; +- *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x021b7d24c9678a35; +- *((unsigned long*)& __m128i_result[0]) = 0x030298a6a1030a49; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvreplve0_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001e001e001e001e; +- *((unsigned long*)& __m128i_op0[0]) = 0x001e001e001e001e; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fff7fff7fff7fff; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x021b7d24c9678a35; +- *((unsigned long*)& __m128i_op1[0]) = 0x030298a6a1030a49; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff4; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x021b7d2449678a35; +- *((unsigned long*)& __m128i_op0[0]) = 0x030298a621030a49; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op2[1]) = 0x021b7d24c9678a35; +- *((unsigned long*)& __m128i_op2[0]) = 0x030298a6a1030a49; +- *((unsigned long*)& __m128i_result[1]) = 0x021b7d24c9678a35; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextl_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; +- *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; +- *((unsigned long*)& __m128i_op1[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op1[0]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_result[1]) = 0xada4808924882588; +- *((unsigned long*)& __m128i_result[0]) = 0xacad25090caca5a4; +- __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xada4808924882588; +- *((unsigned long*)& __m128i_op0[0]) = 0xacad25090caca5a4; +- *((unsigned long*)& __m128i_op1[1]) = 0x021b7d24c9678a35; +- *((unsigned long*)& __m128i_op1[0]) = 0x030298a6a1030a49; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; +- *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; +- *((unsigned long*)& __m128i_result[1]) = 0x00197f26cb658837; +- *((unsigned long*)& __m128i_result[0]) = 0x01009aa4a301084b; +- __m128i_out = __lsx_vbitrevi_b(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001e001e001e001e; +- *((unsigned long*)& __m128i_op0[0]) = 0x001e001e001e001e; +- *((unsigned long*)& __m128i_op1[1]) = 0xffaeffaeffaeffae; +- *((unsigned long*)& __m128i_op1[0]) = 0xffaeffaeffaeffae; +- *((unsigned long*)& __m128i_result[1]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_result[0]) = 0x001effae001effae; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x59f7fd70; +- *((int*)& __m128_result[0]) = 0x59f7fd70; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmax_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00197f26cb658837; +- *((unsigned long*)& __m128i_op0[0]) = 0x01009aa4a301084b; +- *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_result[1]) = 0x0037ffd40083ffe5; +- *((unsigned long*)& __m128i_result[0]) = 0x001e0052001ffff9; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffaeffaeffaeffae; +- *((unsigned long*)& __m128i_op1[0]) = 0xffaeffaeffaeffae; +- *((unsigned long*)& __m128i_result[1]) = 0x0051005200510052; +- *((unsigned long*)& __m128i_result[0]) = 0x0051005200510052; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvpickve_w(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0051005200510052; +- *((unsigned long*)& __m128i_op1[0]) = 0x0051005200510052; +- *((unsigned long*)& __m128i_op2[1]) = 0xffaeffaeffaeffae; +- *((unsigned long*)& __m128i_op2[0]) = 0xffaeffaeffaeffae; +- *((unsigned long*)& __m128i_result[1]) = 0xffffe65ecc1be5bc; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe65ecc1be5bc; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x001effae001effae; +- *((unsigned long*)& __m128d_op0[0]) = 0x001effae001effae; +- *((unsigned long*)& __m128d_result[1]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128d_result[0]) = 0x2006454690d3de87; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd7059f7fd70; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001700000017; +- *((unsigned long*)& __m128i_result[0]) = 0x59f7fd8759f7fd87; +- __m128i_out = __lsx_vaddi_wu(__m128i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001700000017; +- *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd8759f7fd87; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001700000017; +- *((unsigned long*)& __m128i_op1[0]) = 0x59f7fd8759f7fd87; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007fff7fff; +- __m128i_out = __lsx_vssrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_op0[0]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_op1[1]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_op1[0]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0037ffd40083ffe5; +- *((unsigned long*)& __m128i_op0[0]) = 0x001e0052001ffff9; +- *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00df020f0078007f; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00df020f; +- *((int*)& __m128_op0[0]) = 0x0078007f; +- *((int*)& __m128_op1[3]) = 0x0037ffd4; +- *((int*)& __m128_op1[2]) = 0x0083ffe5; +- *((int*)& __m128_op1[1]) = 0x001e0052; +- *((int*)& __m128_op1[0]) = 0x001ffff9; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op1[1]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_op1[0]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_result[1]) = 0x2006454652525252; +- *((unsigned long*)& __m128i_result[0]) = 0x2006454652525252; +- __m128i_out = __lsx_vmin_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffae001effae; +- *((unsigned long*)& __m128i_op0[0]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op1[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op1[0]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_result[1]) = 0xffaeffadffaeffad; +- *((unsigned long*)& __m128i_result[0]) = 0xffaeffadffaeffad; +- __m128i_out = __lsx_vhsubw_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001700000017; +- *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd8759f7fd87; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001700000017; +- *((unsigned long*)& __m128i_op1[0]) = 0x59f7fd8759f7fd87; +- *((unsigned long*)& __m128i_result[1]) = 0x0000021100000211; +- *((unsigned long*)& __m128i_result[0]) = 0xfb141d31fb141d31; +- __m128i_out = __lsx_vmulwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001700000017; +- *((unsigned long*)& __m128i_op0[0]) = 0x59f7fd8759f7fd87; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffae001effae; +- *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000001700000017; +- *((unsigned long*)& __m128i_op2[0]) = 0x59f7fd8759f7fd87; +- *((unsigned long*)& __m128i_result[1]) = 0xfd200ed2fd370775; +- *((unsigned long*)& __m128i_result[0]) = 0x96198318780e32c5; +- __m128i_out = __lsx_vmsub_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfd200ed2fd370775; +- *((unsigned long*)& __m128d_op0[0]) = 0x96198318780e32c5; +- *((unsigned long*)& __m128d_result[1]) = 0xfd200ed2fd370775; +- *((unsigned long*)& __m128d_result[0]) = 0x8000000000000000; +- __m128d_out = __lsx_vfrint_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op0[0]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op1[1]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_op1[0]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_result[1]) = 0x202544f490f2de35; +- *((unsigned long*)& __m128i_result[0]) = 0x202544f490f2de35; +- __m128i_out = __lsx_vadd_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op0[0]) = 0x001effae001effae; +- unsigned_int_result = 0x000000000000001e; +- unsigned_int_out = __lsx_vpickve2gr_hu(__m128i_op0,0x3); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_du(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_result[0]) = 0x004d004d004d004d; +- __m128i_out = __lsx_vldi(1101); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfd200ed2fd370775; +- *((unsigned long*)& __m128i_op0[0]) = 0x96198318780e32c5; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffe65ecc1be5bc; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffe65ecc1be5bc; +- *((unsigned long*)& __m128i_result[1]) = 0xfe212874311c22b9; +- *((unsigned long*)& __m128i_result[0]) = 0x971a9dbaacf34d09; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x202544f490f2de35; +- *((unsigned long*)& __m128i_op0[0]) = 0x202544f490f2de35; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ffff; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0040000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0040000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0040000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0040000000000000; +- __m256i_out = __lasx_xvsrlrni_w_d(__m256i_op0,__m256i_op1,0x2a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000021100000211; +- *((unsigned long*)& __m128i_op0[0]) = 0xfb141d31fb141d31; +- *((unsigned long*)& __m128i_op1[1]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op1[0]) = 0x001effae001effae; +- *((unsigned long*)& __m128i_op2[1]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_op2[0]) = 0x2006454690d3de87; +- *((unsigned long*)& __m128i_result[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_result[0]) = 0xbbc8ecc5f3ced5f3; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_result[1]) = 0xd1c0c0a5baf8f8d3; +- *((unsigned long*)& __m128i_result[0]) = 0xecbbbbc5d5f3f3f3; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0x7c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xfffbfffb; +- *((int*)& __m128_op0[2]) = 0xfffbfffb; +- *((int*)& __m128_op0[1]) = 0xfffbfffb; +- *((int*)& __m128_op0[0]) = 0xfffbfffb; +- *((unsigned long*)& __m128i_result[1]) = 0xfffbfffbfffbfffb; +- *((unsigned long*)& __m128i_result[0]) = 0xfffbfffbfffbfffb; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffaefffbffaefffb; +- *((unsigned long*)& __m128i_op1[0]) = 0xffaefffbffaefffb; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0005ffff0005; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000500000004; +- __m128i_out = __lsx_vsubwev_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op0[0]) = 0x5252525252525252; +- *((unsigned long*)& __m128i_op1[1]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_op1[0]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2006454652525252; +- *((unsigned long*)& __m128i_op0[0]) = 0x2006454652525252; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0040000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0040000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0040000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0040000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_clt_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_result[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_result[0]) = 0xbbc8ecc5f3ced5f3; +- __m128i_out = __lsx_vsrlri_d(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_op1[1]) = 0xffaefffbffaefffb; +- *((unsigned long*)& __m128i_op1[0]) = 0xffaefffbffaefffb; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffc105d1aa; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffbc19ecca; +- __m128i_out = __lsx_vsubwod_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128d_op0[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff8000000000000; +- __m128d_out = __lsx_vfsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_result[1]) = 0xff80ffa2fff0ff74; +- *((unsigned long*)& __m128i_result[0]) = 0xff76ffd8ffe6ffaa; +- __m128i_out = __lsx_vaddwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd1c0c0a5baf8f8d3; +- *((unsigned long*)& __m128i_op0[0]) = 0xecbbbbc5d5f3f3f3; +- *((unsigned long*)& __m128i_op1[1]) = 0xffaefffbffaefffb; +- *((unsigned long*)& __m128i_op1[0]) = 0xffaefffbffaefffb; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000d16fc0a0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ec6abbc0; +- __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x4040404040404040; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- long_int_result = 0xffffffffffffffff; +- long_int_out = __lasx_xvpickve2gr_d(__m256i_op0,0x0); +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffc105d1aa; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbc19ecca; +- *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffff9bffbfb; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffdffdfb; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_op0[0]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffc105d1aa; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffbc19ecca; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff3efa; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff43e6; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_result[1]) = 0x0303030303030303; +- *((unsigned long*)& __m128i_result[0]) = 0x0303030303030303; +- __m128i_out = __lsx_vmini_bu(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xff80ffa2fff0ff74; +- *((unsigned long*)& __m128d_op0[0]) = 0xff76ffd8ffe6ffaa; +- *((unsigned long*)& __m128d_op1[1]) = 0xff80ffa2fff0ff74; +- *((unsigned long*)& __m128d_op1[0]) = 0xff76ffd8ffe6ffaa; +- *((unsigned long*)& __m128d_op2[1]) = 0x0303030303030303; +- *((unsigned long*)& __m128d_op2[0]) = 0x0303030303030303; +- *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; +- __m128d_out = __lsx_vfnmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff80ffa2fff0ff74; +- *((unsigned long*)& __m128i_op0[0]) = 0xff76ffd8ffe6ffaa; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffc105d1aa; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffbc19ecca; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe03ff63ff9bf; +- __m128i_out = __lsx_vsran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_result[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_result[0]) = 0xbbc8ecc5f3ced5f3; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000080; +- __m256i_out = __lasx_vext2xv_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff80ffa2fff0ff74; +- *((unsigned long*)& __m128i_op0[0]) = 0xff76ffd8ffe6ffaa; +- *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; +- *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; +- *((unsigned long*)& __m128i_result[1]) = 0xe01ae8a3fc55dd23; +- *((unsigned long*)& __m128i_result[0]) = 0xdd9ff64ef9daeace; +- __m128i_out = __lsx_vavg_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- unsigned_int_result = 0x00000000ffffffff; +- unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x5); +- *((unsigned long*)& __m128i_op0[1]) = 0x0303030303030303; +- *((unsigned long*)& __m128i_op0[0]) = 0x0303030303030303; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x02f3030303030303; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x06d9090909090909; +- __m128i_out = __lsx_vmul_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x02f3030303030303; +- *((unsigned long*)& __m128i_op1[1]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_op1[0]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x02f3030303100303; +- __m128i_out = __lsx_vfrstpi_b(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffe00000001; +- __m256i_out = __lasx_xvmaddwev_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_op0[0]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_result[1]) = 0x0001340134013401; +- *((unsigned long*)& __m128i_result[0]) = 0x0001340134013401; +- __m128i_out = __lsx_vsrari_d(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0303030303030303; +- *((unsigned long*)& __m128i_op0[0]) = 0x0303030303030303; +- *((unsigned long*)& __m128i_result[1]) = 0x1313131313131313; +- *((unsigned long*)& __m128i_result[0]) = 0x1313131313131313; +- __m128i_out = __lsx_vbitseti_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x030804010d090107; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1313131313131313; +- *((unsigned long*)& __m128i_op1[0]) = 0x1313131313131313; +- *((unsigned long*)& __m128i_result[1]) = 0x0039d21e3229d4e8; +- *((unsigned long*)& __m128i_result[0]) = 0x6d339b4f3b439885; +- __m128i_out = __lsx_vmulwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x06d9090909090909; +- *((unsigned long*)& __m128i_op1[1]) = 0x0039d21e3229d4e8; +- *((unsigned long*)& __m128i_op1[0]) = 0x6d339b4f3b439885; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000db24848; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1313131313131313; +- *((unsigned long*)& __m128i_op0[0]) = 0x1313131313131313; +- *((unsigned long*)& __m128i_op1[1]) = 0x34947b4b11684f92; +- *((unsigned long*)& __m128i_op1[0]) = 0xd73691661e5b68b4; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffff000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000d00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffef; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000c; +- __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffe00000001; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff0100000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff01ff0100000000; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff0100000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff01ff0100000000; +- __m256i_out = __lasx_xvsubwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x06d9090909090909; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x48); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0039d21e3229d4e8; +- *((unsigned long*)& __m128i_op0[0]) = 0x6d339b4f3b439885; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffff000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000d00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffc0000000000000; +- __m128i_out = __lsx_vssrarni_d_q(__m128i_op0,__m128i_op1,0x2e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfffffff000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000d00000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffc0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff00000001; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x34947b4b11684f92; +- *((unsigned long*)& __m128i_op0[0]) = 0xd73691661e5b68b4; +- *((unsigned long*)& __m128i_op1[1]) = 0x000016f303dff6d2; +- *((unsigned long*)& __m128i_op1[0]) = 0x000016f303dff6d2; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x7fffffff00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x34947b4b11684f92; +- *((unsigned long*)& __m128i_result[0]) = 0xee297a731e5c5f86; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; +- __m256i_out = __lasx_xvssrani_b_h(__m256i_op0,__m256i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x05f5e2320605e1e2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x03080401; +- *((int*)& __m128_op0[2]) = 0x0d090107; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; +- __m256i_out = __lasx_xvldi(1820); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x34947b4b11684f92; +- *((unsigned long*)& __m128i_op1[0]) = 0xee297a731e5c5f86; +- *((unsigned long*)& __m128i_result[1]) = 0xff6cffb5ff98ff6e; +- *((unsigned long*)& __m128i_result[0]) = 0xffd7ff8dffa4ff7a; +- __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff6cffb5ff98ff6e; +- *((unsigned long*)& __m128i_op0[0]) = 0xffd7ff8dffa4ff7a; +- *((unsigned long*)& __m128i_op1[1]) = 0x34947b4b11684f92; +- *((unsigned long*)& __m128i_op1[0]) = 0xee297a731e5c5f86; +- *((unsigned long*)& __m128i_op2[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffc0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000868686868686; +- __m128i_out = __lsx_vshuf_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000180; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000007f80; +- __m256i_out = __lasx_xvmadd_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ff1b00e4; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010003; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010081; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010003; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100018080; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000868686868686; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1c3fc7; +- *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1c3fc7; +- *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; +- __m256i_out = __lasx_xvsrlr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x000000000000001e; +- *((unsigned long*)& __m128i_result[1]) = 0x1e1e1e1e1e1e1e1e; +- *((unsigned long*)& __m128i_result[0]) = 0x1e1e1e1e1e1e1e1e; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1c3fc7; +- *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1c3fc7; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; +- __m256i_out = __lasx_xvssrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0xff1cff1c; +- *((int*)& __m256_op1[6]) = 0xff1cff1c; +- *((int*)& __m256_op1[5]) = 0xff1cff1c; +- *((int*)& __m256_op1[4]) = 0xff1cff1c; +- *((int*)& __m256_op1[3]) = 0xff1cff1c; +- *((int*)& __m256_op1[2]) = 0xff1cff1c; +- *((int*)& __m256_op1[1]) = 0xff1cff1c; +- *((int*)& __m256_op1[0]) = 0xff1cff1c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[3]) = 0xffffff1cffffff1c; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff1cffffff1c; +- *((unsigned long*)& __m256i_result[1]) = 0xffffff1cffffff1c; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff1cffffff1c; +- __m256i_out = __lasx_xvexth_w_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000002; +- __m256i_out = __lasx_vext2xv_wu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_d(__m128i_op0,15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x5252525252525252; +- *((unsigned long*)& __m128d_op0[0]) = 0x5252dcdcdcdcdcdc; +- *((unsigned long*)& __m128d_result[1]) = 0x2d8bf1f8fc7e3f20; +- *((unsigned long*)& __m128d_result[0]) = 0x2d8b24b936d1b24d; +- __m128d_out = __lsx_vfrecip_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ff80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_wu_d(__m256i_op0,__m256i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000001c; +- __m256i_out = __lasx_xvand_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000400000004; +- __m256i_out = __lasx_xvmulwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf8f8372f752402ee; +- *((unsigned long*)& __m128i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffc0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000868686868686; +- *((unsigned long*)& __m128i_op1[1]) = 0x1e1e1e1e1e1e1e1e; +- *((unsigned long*)& __m128i_op1[0]) = 0x1e1e1e1e1e1e1e1e; +- *((unsigned long*)& __m128i_result[1]) = 0x0f0f0f0f0f0f0f0f; +- *((unsigned long*)& __m128i_result[0]) = 0x0f0f525252525252; +- __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_h(__m256i_op0,11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0674c8868a74fc80; +- *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; +- *((unsigned long*)& __m128d_result[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128d_result[0]) = 0xc3818bffe7b7a7b8; +- __m128d_out = __lsx_vffint_d_l(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000400000004; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000400000004; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128d_op0[0]) = 0xc3818bffe7b7a7b8; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f1f1f1f1f1; +- *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1f1f1f1f1; +- __m128i_out = __lsx_vmini_b(__m128i_op0,-15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000400000004; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff1cff18; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffff1cff18; +- __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff1cff18; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff1cff18; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1f3f06d4fcba4e98; +- *((unsigned long*)& __m128i_op0[0]) = 0x2e1135681fa8d951; +- *((unsigned long*)& __m128i_op1[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000007d07fffffff; +- __m128i_out = __lsx_vssrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1b00e300e4; +- *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1b00e300e4; +- *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1b00e300e4; +- *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1b00e30100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x002000000020ffff; +- __m256i_out = __lasx_xvsrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffff1cff1c; +- __m256i_out = __lasx_xvaddwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffff1cff1c; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; +- __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0xdc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128i_op0[0]) = 0xc3818bffe7b7a7b8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vmskltz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op2[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128i_op2[0]) = 0xc3818bffe7b7a7b8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbsll_v(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x084d1a0907151a3d; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x084d1a0907151a3d; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000007d07fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; +- __m128i_out = __lsx_vssrani_b_h(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000868686868686; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000868600008785; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; +- __m256i_out = __lasx_xvaddwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0674c8868a74fc80; +- *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; +- int_result = 0x00000000090b0906; +- int_out = __lsx_vpickve2gr_w(__m128i_op0,0x0); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000008686; +- *((unsigned long*)& __m128i_op0[0]) = 0x00008e5680008685; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00007fff7fff8000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000868686868686; +- *((unsigned long*)& __m128i_op1[1]) = 0x0674c8868a74fc80; +- *((unsigned long*)& __m128i_op1[0]) = 0xfdce8003090b0906; +- *((unsigned long*)& __m128i_op2[1]) = 0x0674c8868a74fc80; +- *((unsigned long*)& __m128i_op2[0]) = 0xfdce8003090b0906; +- *((unsigned long*)& __m128i_result[1]) = 0x0029aeaca57d74e6; +- *((unsigned long*)& __m128i_result[0]) = 0xdbe332365392c686; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000b000b000b000b; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000b000b000b000b; +- __m256i_out = __lasx_xvpcnt_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0020000000200000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x002000000020ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000056f64adb9464; +- *((unsigned long*)& __m128i_op1[0]) = 0x29ca096f235819c2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000004399d32; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xff1cff1c; +- *((int*)& __m256_op0[6]) = 0xff1cff1c; +- *((int*)& __m256_op0[5]) = 0xff1cff1c; +- *((int*)& __m256_op0[4]) = 0xff1cff1c; +- *((int*)& __m256_op0[3]) = 0xff1cff1c; +- *((int*)& __m256_op0[2]) = 0xff1cff1c; +- *((int*)& __m256_op0[1]) = 0xff1cff1c; +- *((int*)& __m256_op0[0]) = 0xff1cff1c; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0674c886fcba4e98; +- *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; +- *((unsigned long*)& __m128i_result[1]) = 0x003fffc0ffc0003f; +- *((unsigned long*)& __m128i_result[0]) = 0xffc0ffc0003f003f; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xd3220000d3f20000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8bff0000a7b80000; +- *((unsigned long*)& __m128i_result[1]) = 0x0909000009090000; +- *((unsigned long*)& __m128i_result[0]) = 0x0909000009090000; +- __m128i_out = __lsx_vmini_bu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x000b000b000b000b; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000b000b000b000b; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4399d3221a29d3f2; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0674c886fcba4e98; +- *((unsigned long*)& __m128i_op1[0]) = 0xfdce8003090b0906; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff001a00000000; +- __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0029aeaca57d74e6; +- *((unsigned long*)& __m128i_op0[0]) = 0xdbe332365392c686; +- *((unsigned long*)& __m128i_op1[1]) = 0x000056f64adb9464; +- *((unsigned long*)& __m128i_op1[0]) = 0x29ca096f235819c2; +- *((unsigned long*)& __m128i_result[1]) = 0x002a05a2f059094a; +- *((unsigned long*)& __m128i_result[0]) = 0x05ad3ba576eae048; +- __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0674c886fcba4e98; +- *((unsigned long*)& __m128i_op0[0]) = 0xfdce8003090b0906; +- *((unsigned long*)& __m128i_op1[1]) = 0x003fffc0ffc0003f; +- *((unsigned long*)& __m128i_op1[0]) = 0xffc0ffc0003f003f; +- *((unsigned long*)& __m128i_op2[1]) = 0x002a05a2f059094a; +- *((unsigned long*)& __m128i_op2[0]) = 0x05ad3ba576eae048; +- *((unsigned long*)& __m128i_result[1]) = 0xd4a6cc27d02397ce; +- *((unsigned long*)& __m128i_result[0]) = 0x24b85f887e903abe; +- __m128i_out = __lsx_vmadd_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_result[2]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_result[1]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_result[0]) = 0x6b6b6b6b6b6b6b6b; +- __m256i_out = __lasx_xvori_b(__m256i_op0,0x6b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_op0[2]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_op0[1]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_op0[0]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_op1[3]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_op1[2]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_op1[1]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_op1[0]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; +- __m256i_out = __lasx_xvsrlr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff001a00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x003fffc0ffc0003f; +- *((unsigned long*)& __m128i_op1[0]) = 0xffc0ffc0003f003f; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff0000000000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000ff00ff; +- __m128i_out = __lsx_vslt_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[3]) = 0x0017001700176d6d; +- *((unsigned long*)& __m256i_result[2]) = 0x0017001700176d6d; +- *((unsigned long*)& __m256i_result[1]) = 0x0017001700176d6d; +- *((unsigned long*)& __m256i_result[0]) = 0x0017001700176d6d; +- __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0909000009090000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0909000009090000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0909000009090000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0909000009090000; +- *((unsigned long*)& __m128i_op2[1]) = 0x002a05a2f059094a; +- *((unsigned long*)& __m128i_op2[0]) = 0x05ad3ba576eae048; +- *((unsigned long*)& __m128i_result[1]) = 0x0909e0480909e048; +- *((unsigned long*)& __m128i_result[0]) = 0x0909e0480909e048; +- __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000004000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff9411; +- *((unsigned long*)& __m128i_result[1]) = 0x000000400000004c; +- *((unsigned long*)& __m128i_result[0]) = 0x00007770ffff941d; +- __m128i_out = __lsx_vaddi_du(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000004000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff9411; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000004000000040; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff9411; +- *((unsigned long*)& __m128i_result[1]) = 0x0000100000001000; +- *((unsigned long*)& __m128i_result[0]) = 0x37b951002d81a921; +- __m128i_out = __lsx_vmulwev_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x003fffc0ffc0003f; +- *((unsigned long*)& __m128i_op0[0]) = 0xffc0ffc0003f003f; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff941d; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000ffff000; +- *((unsigned long*)& __m128i_result[0]) = 0x000077529b522400; +- __m128i_out = __lsx_vmulwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007fff7fff8000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000b81c8382; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000077af9450; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00007efe7f7f8000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007efe7f7f8000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000b81c8382; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000077af9450; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000077af9450; +- __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x003fffc0; +- *((int*)& __m128_op0[2]) = 0xffc0003f; +- *((int*)& __m128_op0[1]) = 0xffc0ffc0; +- *((int*)& __m128_op0[0]) = 0x003f003f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000004000000040; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff9411; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff941d; +- *((unsigned long*)& __m128i_result[1]) = 0x000000400000004c; +- *((unsigned long*)& __m128i_result[0]) = 0x000047404f4f040d; +- __m128i_out = __lsx_vbitseli_b(__m128i_op0,__m128i_op1,0x4f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvbitsel_v(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000077af9450; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; +- *((unsigned long*)& __m128i_op1[0]) = 0x000047404f4f040d; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000214f; +- *((unsigned long*)& __m128i_result[0]) = 0xc31b63d846ebc810; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000400000004c; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007770ffff941d; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007770ffff941d; +- *((unsigned long*)& __m128i_result[1]) = 0x000000400000004c; +- *((unsigned long*)& __m128i_result[0]) = 0x00007770ffff941d; +- __m128i_out = __lsx_vfrstpi_h(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; +- __m256i_out = __lasx_xvmadd_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x98); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000100000001000; +- *((unsigned long*)& __m128i_op0[0]) = 0x37b951002d81a921; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000400000004c; +- *((unsigned long*)& __m128i_op1[0]) = 0x000047404f4f040d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000082000000826; +- *((unsigned long*)& __m128i_result[0]) = 0x1b5c4c203e685617; +- __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000100000001000; +- *((unsigned long*)& __m128i_op1[0]) = 0x37b951002d81a921; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000001; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x3e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000214f; +- *((unsigned long*)& __m128i_op0[0]) = 0xc31b63d846ebc810; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ff0000800000ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff941d; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000010a7; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000046ebaa2c; +- __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000b81c8382; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000077af9450; +- *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; +- *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1865e65a1; +- __m128i_out = __lsx_vxori_b(__m128i_op0,0xf1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000077af9450; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3ff0000000000000; +- __m128i_out = __lsx_vfrintrp_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007fff7fff8000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000007f7f7f; +- __m128i_out = __lsx_vssrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f7f7f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x007f007f00007f7f; +- __m128i_out = __lsx_vshuf4i_h(__m128i_op0,0x58); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000010a7; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000046ebaa2c; +- *((unsigned long*)& __m128i_op1[1]) = 0xf1f1f1f149ed7273; +- *((unsigned long*)& __m128i_op1[0]) = 0xf1f1f1f1865e65a1; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000800000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000800000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf1f1f1f149ed7273; +- *((unsigned long*)& __m128i_op0[0]) = 0xf1f1f1f1865e65a1; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffff941d; +- *((unsigned long*)& __m128i_op2[1]) = 0xf1f1f1f149ed7273; +- *((unsigned long*)& __m128i_op2[0]) = 0xf1f1f1f1865e65a1; +- *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; +- *((unsigned long*)& __m128i_result[0]) = 0x78508ad4ec2ffcde; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; +- __m128i_out = __lsx_vslti_h(__m128i_op0,15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffdfdc0d; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3ff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffdfdc0d; +- __m128i_out = __lsx_vsrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; +- __m256i_out = __lasx_xvaddwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrz_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; +- __m256i_out = __lasx_xvsat_bu(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf1f1f1f149ed7273; +- *((unsigned long*)& __m128i_op0[0]) = 0x78508ad4ec2ffcde; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffdfdc0d; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x00000000ffdfdc0d; +- *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; +- *((unsigned long*)& __m128i_result[0]) = 0x78508ad4ae70fd87; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; +- __m256i_out = __lasx_xvmsub_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000040; +- __m256i_out = __lasx_xvclz_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000014eb54ab; +- *((unsigned long*)& __m128i_op0[0]) = 0x14eb6a002a406a00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffdfdc0d; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000a752a55; +- *((unsigned long*)& __m128i_result[0]) = 0x0a753500950fa306; +- __m128i_out = __lsx_vavg_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; +- int_op1 = 0x00000000090b0906; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000090b0906; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000d6d6d; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000014eb54ab; +- *((unsigned long*)& __m128d_op0[0]) = 0x14eb6a002a406a00; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x00007fff7fff8000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x007fffff; +- *((int*)& __m128_op0[1]) = 0x007fffff; +- *((int*)& __m128_op0[0]) = 0xff800000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x007f7f7f; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x380fdfdfc0000000; +- __m128d_out = __lsx_vfcvtl_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vinsgr2vr_h(__m128i_op0,int_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000014eb54ab; +- *((unsigned long*)& __m128i_op1[0]) = 0x14eb6a002a406a00; +- *((unsigned long*)& __m128i_result[1]) = 0xe0001fffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x66); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff8000; +- __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; +- __m256i_out = __lasx_xvaddwev_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffff8; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff8000; +- __m256d_out = __lasx_xvfsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x380fdfdfc0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffc7f100004000; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000007f7f7f; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffc7f100004000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000c7f14000; +- __m128i_out = __lsx_vssrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x0a752a55; +- *((int*)& __m128_op0[1]) = 0x0a753500; +- *((int*)& __m128_op0[0]) = 0x950fa306; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x380fdfdf; +- *((int*)& __m128_op1[0]) = 0xc0000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000014eb54ab; +- *((unsigned long*)& __m128d_op0[0]) = 0x14eb6a002a406a00; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000014eb54ab; +- *((unsigned long*)& __m128d_op1[0]) = 0x14eb6a002a406a00; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000004000000040; +- __m256i_out = __lasx_xvadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000014eb54ab; +- *((unsigned long*)& __m128i_op1[0]) = 0x14eb6a002a406a00; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff14eb54ab; +- *((unsigned long*)& __m128i_result[0]) = 0x14ea6a002a406a00; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000004000000040; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff14eb54ab; +- *((unsigned long*)& __m128i_op0[0]) = 0x14ea6a002a406a00; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff80008a7555aa; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a7535006af05cf9; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; +- __m128i_out = __lsx_vdiv_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000014eb54ab; +- *((unsigned long*)& __m128i_op0[0]) = 0x14eb6a002a406a00; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff80008a7555aa; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a7535006af05cf9; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff758aaa56; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffa9fb0d07; +- __m128i_out = __lsx_vhsubw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000a752a55; +- *((unsigned long*)& __m128i_op0[0]) = 0x0a753500950fa306; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff14eb54ab; +- *((unsigned long*)& __m128i_op1[0]) = 0x14ea6a002a406a00; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x00007fff7fff8000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000a752a55; +- *((unsigned long*)& __m128i_result[0]) = 0x0a753500950fa306; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x68); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000200; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x0a752a55; +- *((int*)& __m128_op0[1]) = 0x0a753500; +- *((int*)& __m128_op0[0]) = 0x950fa306; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x0a752a55; +- *((int*)& __m128_op1[1]) = 0x0a753500; +- *((int*)& __m128_op1[0]) = 0x950fa306; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000a752a55; +- *((unsigned long*)& __m128i_op0[0]) = 0x0a753500950fa306; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x000000000a752a55; +- *((unsigned long*)& __m128i_op2[0]) = 0x0a753500950fa306; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000a752a55; +- *((unsigned long*)& __m128i_result[0]) = 0x0a753500a9fa0d06; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000a752a55; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a753500a9fa0d06; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xf589caff5605f2fa; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000090b0906; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100002000; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000090b0906; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000005060503; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000073737; +- __m256i_out = __lasx_xvavgr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000090b0906; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000d6d6d; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000005060503; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000073737; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000050007; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000039; +- __m256i_out = __lasx_xvaddwod_h_bu_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000050007; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000039; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100002000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_w_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_b(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000a752a55; +- *((unsigned long*)& __m128i_op0[0]) = 0x0a753500a9fa0d06; +- *((unsigned long*)& __m128i_result[1]) = 0x0d060d060d060d06; +- *((unsigned long*)& __m128i_result[0]) = 0x0d060d060d060d06; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf589caff5605f2fa; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000014eb54ab; +- *((unsigned long*)& __m128i_op1[0]) = 0x14eb6a002a406a00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000eb00ab; +- *((unsigned long*)& __m128i_result[0]) = 0x017400ff004500fa; +- __m128i_out = __lsx_vaddwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvfmin_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_bu(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x0a752a55; +- *((int*)& __m128_op0[1]) = 0x0a753500; +- *((int*)& __m128_op0[0]) = 0xa9fa0d06; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrnel_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,-7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007fff7fff8000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000100000040; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000100002000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffc0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00000000; +- __m256i_out = __lasx_xvhsubw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007fff7fff8000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpermi_w(__m128i_op0,__m128i_op1,0xce); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000014; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000014; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000014; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000014; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xf589caff5605f2fa; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000055; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000054; +- __m256i_out = __lasx_xvmskltz_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; +- __m256i_out = __lasx_xvfrstp_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0d060d060d060d06; +- *((unsigned long*)& __m128i_op0[0]) = 0x0d060d060d060d06; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0d060d060d060d06; +- __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0d060d060d060d06; +- *((unsigned long*)& __m128i_op0[0]) = 0x0d060d060d060d06; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,-11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000200; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvslti_wu(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000a74aa8a55ab; +- *((unsigned long*)& __m128i_op0[0]) = 0x6adeb5dfcb000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000003ff8; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000a74aa8a55ab; +- *((unsigned long*)& __m128i_op0[0]) = 0x6adeb5dfcb000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0a7480007fff8000; +- __m128i_out = __lsx_vssran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7200000072000000; +- __m256i_out = __lasx_xvldi(-3214); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_result[3]) = 0x721e001e721e001e; +- *((unsigned long*)& __m256i_result[2]) = 0x721e001e721e001e; +- *((unsigned long*)& __m256i_result[1]) = 0x721e001e721e001e; +- *((unsigned long*)& __m256i_result[0]) = 0x721e001e721e001e; +- __m256i_out = __lasx_xvaddi_hu(__m256i_op0,0x1e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000055; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000054; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; +- __m256i_out = __lasx_xvsran_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000a74aa8a55ab; +- *((unsigned long*)& __m128i_op0[0]) = 0x6adeb5dfcb000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrln_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000007000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffeffff0000; +- __m256i_out = __lasx_xvadd_q(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000003ff8; +- __m128i_out = __lsx_vsat_w(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00003ff8; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0xff800000; +- *((int*)& __m128_result[0]) = 0xc3080000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0x0000033a; +- *((int*)& __m128_op0[2]) = 0x0bde0853; +- *((int*)& __m128_op0[1]) = 0x0a960e6b; +- *((int*)& __m128_op0[0]) = 0x0a4f0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzl_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_result[2]) = 0x2020000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_result[0]) = 0x2020000000000000; +- __m256i_out = __lasx_xvsrarni_d_q(__m256i_op0,__m256i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff0000; +- __m256i_out = __lasx_xvsubwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256d_op1[2]) = 0x2020000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256d_op1[0]) = 0x2020000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256d_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256d_result[0]) = 0x7fffffffffffffff; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x467fe000; +- __m128_out = __lsx_vffint_s_w(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0xffffff1dffffff1d; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff1dffffff1d; +- *((unsigned long*)& __m256i_result[1]) = 0xffffff1dffffff1d; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff1dffffff1d; +- __m256i_out = __lasx_xvldi(2845); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001f00000020; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001f00000020; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xff00ffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xff00ffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00ffffffffffffff; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x000fffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x000fffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrmh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000467fe000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000003ff8; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000003ff8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000467fef81; +- __m128i_out = __lsx_vmaddwod_h_bu_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000467fef81; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x000000ffffff1dff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffff1dffffff1dff; +- *((unsigned long*)& __m256i_op2[1]) = 0x000000ffffff1dff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffff1dffffff1dff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0020; +- *((unsigned long*)& __m256i_result[2]) = 0xffff8001ffff0001; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0020; +- *((unsigned long*)& __m256i_result[0]) = 0xffff8001ffff0001; +- __m256i_out = __lasx_xvmaddwod_w_hu_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000467fef81; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000013; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ffffff1dff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff1dffffff1dff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ffffff1dff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff1dffffff1dff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff1dffffff1dff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff1dffffff1dff; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_du_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x2aaaaa85aaaaaa85; +- *((unsigned long*)& __m256i_op1[2]) = 0x2aaa48f4aaaa48f4; +- *((unsigned long*)& __m256i_op1[1]) = 0x2aaaaa85aaaaaa85; +- *((unsigned long*)& __m256i_op1[0]) = 0x2aaa48f4aaaa48f4; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff0000ffff; +- __m256i_out = __lasx_xvsle_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000003ff8; +- *((unsigned long*)& __m128d_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128d_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_cle_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0020; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff8001ffff0001; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0020; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff8001ffff0001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x00ff008000ff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x00ff008000ff0000; +- __m256i_out = __lasx_xvaddwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x001fffffffe00011; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x001fffffffe00011; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvmaxi_hu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000003ff8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00011; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00011; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffffffffffff; +- __m256i_out = __lasx_xvssrlni_d_q(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0020; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff8001ffff0001; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0020; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff8001ffff0001; +- *((unsigned long*)& __m256i_result[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff8001ffff8001; +- *((unsigned long*)& __m256i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff8001ffff8001; +- __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x6e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x41dffc0000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x41dffc0000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x01533b5e7489ae24; +- *((unsigned long*)& __m128i_op0[0]) = 0xe519ab7e71e33848; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x01533b5e7489ae24; +- *((unsigned long*)& __m128i_result[0]) = 0xffffab7e71e33848; +- __m128i_out = __lsx_vextrins_h(__m128i_op0,__m128i_op1,0xbc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff8001ffff8001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffff000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff8001ffff8001; +- *((unsigned long*)& __m256i_result[3]) = 0x7fff800000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffc0017fffc001; +- *((unsigned long*)& __m256i_result[1]) = 0x7fff800000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffc0017fffc001; +- __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff9dff9dff9dff9d; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x01533b5e7489ae24; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffab7e71e33848; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x3b5eae24ab7e3848; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003ff8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff9dff9dff9dff9d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffceffceffcf1fcb; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x00000000090b0906; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_b(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_vpickve2gr_h(__m128i_op0,0x3); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhsubw_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000c6c60000c6c6; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000c6c58000c6b2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000c6c40000c6c6; +- *((unsigned long*)& __m128i_result[0]) = 0x8000c6c78000c6b2; +- __m128i_out = __lsx_vbitrevi_d(__m128i_op0,0x21); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000000000000; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x30); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x0000ffff; +- *((int*)& __m128_op0[1]) = 0x3b5eae24; +- *((int*)& __m128_op0[0]) = 0xab7e3848; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00003f80; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000003f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x800fffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x800fffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x800fffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x800fffffffffffff; +- __m256i_out = __lasx_xvnor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x01533b5e7489ae24; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffab7e71e33848; +- *((unsigned long*)& __m128d_op1[1]) = 0x01533b5e7489ae24; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffab7e71e33848; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xffffab7e71e33848; +- __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffab7e71e33848; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffe1ffffffe1; +- *((unsigned long*)& __m128i_result[0]) = 0xffffab5f71e33829; +- __m128i_out = __lsx_vsubi_wu(__m128i_op0,0x1f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000075dbe982; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000071e48cca; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0ebb7d300e3c9199; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrarni_w_d(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrm_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfbff0000ffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfbff0000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x800fffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x800fffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x800fffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x800fffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0020; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff8001ffff0001; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0020; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff8001ffff0001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0020; +- *((unsigned long*)& __m256i_result[2]) = 0xffff8001ffff0001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff8001ffff0001; +- __m256i_out = __lasx_xvinsve0_w(__m256i_op0,__m256i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x01533b5e7489ae24; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffab7e71e33848; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff00ff00ff00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0xff00ff00ff00ff00; +- __m128i_out = __lsx_vilvh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x01533b5e7489ae24; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffab7e71e33848; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xce9135c49ffff570; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_d_q(__m128i_op0,__m128i_op1,0x23); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000800000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000800000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000800000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000800000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfbff0000ffff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xfbff0000ffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfbff0000ffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0xff00000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xfbff0000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0xff00000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7f80ffffff808000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7f80ffffff808000; +- __m256i_out = __lasx_xvssrarni_b_h(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwev_q_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_w(__m128i_op0,-14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f80ffffff808000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f80ffffff808000; +- *((unsigned long*)& __m256i_result[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f0000007f7fff; +- *((unsigned long*)& __m256i_result[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_result[0]) = 0x007f0000007f7fff; +- __m256i_out = __lasx_xvabsd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffe00000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x001f001fffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffe0ffe000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x001f001fffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffe0ffe000000000; +- __m256i_out = __lasx_xvilvh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7f80ffffff808000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7f80ffffff808000; +- *((unsigned long*)& __m256i_op1[3]) = 0x001f001fffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffe0ffe000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x001f001fffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffe0ffe000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffe0ffe000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x7fa0001fff808000; +- *((unsigned long*)& __m256i_result[1]) = 0xffe0ffe000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x7fa0001fff808000; +- __m256i_out = __lasx_xvmadd_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vsubwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000004; +- __m128i_out = __lsx_vmaxi_du(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x60600000ffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x6060000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x60600000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x6060000000000000; +- __m256i_out = __lasx_xvbitseli_b(__m256i_op0,__m256i_op1,0x60); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x001f001fffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffe0ffe000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x001f001fffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffe0ffe000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_h_bu_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffe0ffe000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fa0001fff808000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffe0ffe000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fa0001fff808000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x007f0000ffffff80; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x007f0000ffffff80; +- __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256d_op1[3]) = 0x001f001fffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffe0ffe000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x001f001fffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffe0ffe000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sle_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_seq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xce9035c49ffff570; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[0]) = 0xce9035c49ffff574; +- __m128i_out = __lsx_vsadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xce9035c49ffff570; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op1[0]) = 0xce9035c49ffff574; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[0]) = 0x00000454ffff9573; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000004; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000004; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000004; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000004; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000004; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000004; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100010001; +- __m128i_out = __lsx_vsubwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000454ffff9573; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000004; +- *((unsigned long*)& __m128i_result[0]) = 0x00000454ffff9573; +- __m128i_out = __lsx_vshuf4i_b(__m128i_op0,0xa4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256d_op0[1]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffe6ffe6e6800001; +- *((unsigned long*)& __m256d_op1[2]) = 0x19660019ff806680; +- *((unsigned long*)& __m256d_op1[1]) = 0xffe6ffe6e6800001; +- *((unsigned long*)& __m256d_op1[0]) = 0x19660019ff806680; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00ff0000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00ff0000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00ff0000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00ff0000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,-14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffffffffff00; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ff8000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffffffffff00; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ff8000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000016; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000016; +- __m128i_out = __lsx_vaddi_du(__m128i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_op0[2]) = 0x007f0000ff807f81; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_op0[0]) = 0x007f0000ff807f81; +- *((unsigned long*)& __m256i_result[3]) = 0x5d5d5d5d5d22a2a2; +- *((unsigned long*)& __m256i_result[2]) = 0xa2dda2a25d22dd23; +- *((unsigned long*)& __m256i_result[1]) = 0x5d5d5d5d5d22a2a2; +- *((unsigned long*)& __m256i_result[0]) = 0xa2dda2a25d22dd23; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0xa2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_op1[2]) = 0x007f0000ff807f81; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_op1[0]) = 0x007f0000ff807f81; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlni_b_h(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffff8000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffff8000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffff8000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffff8000; +- __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000010; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001000000010; +- __m128i_out = __lsx_vssrlrn_hu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000ff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000ff0000; +- __m256i_out = __lasx_xvexth_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00c00040; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000008000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00c00040; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000008000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_bu_h(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000001000000048; +- *((unsigned long*)& __m128d_result[1]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m128d_result[0]) = 0xc090380000000000; +- __m128d_out = __lsx_vflogb_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000048; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffeffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000016; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xc090380000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000200000000d; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000200000000; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmskltz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x001d001d001d001d; +- *((unsigned long*)& __m128i_result[0]) = 0x001d001d001d001d; +- __m128i_out = __lsx_vmaxi_hu(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000200000000d; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000200000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_b(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256i_op1[1]) = 0x41dffbffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ff800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- int_op0 = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvreplgr2vr_w(int_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xc090380000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m128i_op1[0]) = 0xc090380000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffbfffc; +- *((unsigned long*)& __m128i_result[0]) = 0xc090380000000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_result[2]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_result[0]) = 0x1c1c1c1c1c1c1c1c; +- __m256i_out = __lasx_xvmaxi_bu(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001d001d001d001d; +- *((unsigned long*)& __m128i_op0[0]) = 0x001d001d001d0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x001d001d001d001d; +- *((unsigned long*)& __m128i_op1[0]) = 0x001d001d001d0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvilvh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_b_h(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00ff00fffbfffc; +- *((unsigned long*)& __m128i_op0[0]) = 0xff01ff1100000048; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_result[0]) = 0xffffffffffffffff; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; +- __m128i_out = __lsx_vmaxi_h(__m128i_op0,4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x7070545438381c1c; +- *((unsigned long*)& __m256i_result[2]) = 0x7070545438381c1c; +- *((unsigned long*)& __m256i_result[1]) = 0x7070545438381c1c; +- *((unsigned long*)& __m256i_result[0]) = 0x7070545438381c1c; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7070545438381c1c; +- *((unsigned long*)& __m256i_op0[2]) = 0x7070545438381c1c; +- *((unsigned long*)& __m256i_op0[1]) = 0x7070545438381c1c; +- *((unsigned long*)& __m256i_op0[0]) = 0x7070545438381c1c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff00ffff8000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff00ffff8000; +- __m256i_out = __lasx_xvmin_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_op0[0]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0004000400040004; +- *((unsigned long*)& __m128i_result[0]) = 0x0004000400040004; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[0]) = 0xff01ff01ff01ff01; +- __m256i_out = __lasx_xvsubwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffff00ffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffff00ffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff00007fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff00007fff; +- __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfe03fe01fe01fe01; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe3bfa3ffe3bfb21; +- *((unsigned long*)& __m128i_op1[1]) = 0x001d001d001d001d; +- *((unsigned long*)& __m128i_op1[0]) = 0x001d001d001d0000; +- *((unsigned long*)& __m128i_result[1]) = 0x001d001d001d001d; +- *((unsigned long*)& __m128i_result[0]) = 0x001d001d001d0000; +- __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[2]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[1]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_op0[0]) = 0xff01ff01ff01ff01; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001200000012; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrai_d(__m128i_op0,0x3d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000001; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000001; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvftintrph_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_result[3]) = 0x1a1a1a2c1a1a1a2c; +- *((unsigned long*)& __m256i_result[2]) = 0x1a1a1a2c1a1a1a2c; +- *((unsigned long*)& __m256i_result[1]) = 0x1a1a1a2c1a1a1a2c; +- *((unsigned long*)& __m256i_result[0]) = 0x1a1a1a2c1a1a1a2c; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128d_op0[0]) = 0xfe03fe3ffe01fa21; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpickod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_b(__m256i_op0,2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128d_op0[0]) = 0xfe03fe3ffe01fa21; +- *((unsigned long*)& __m128i_result[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128i_result[0]) = 0xfe03fe3ffe01fa21; +- __m128i_out = __lsx_vfrintrz_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe03fe3ffe01fa21; +- *((unsigned long*)& __m128i_result[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128i_result[0]) = 0xfe03fe3ffe01fa21; +- __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_du(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128i_op2[0]) = 0xfe03fe3ffe01fa21; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmsub_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128i_op0[0]) = 0xfe03fe3ffe01fa21; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128d_op0[0]) = 0xfe03fe3ffe01fa21; +- *((unsigned long*)& __m128d_op1[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128d_op1[0]) = 0xfe03fe3ffe01fa21; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8000000080000000; +- __m128i_out = __lsx_vftintrp_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x1a1a1a2c1a1a1a2c; +- *((unsigned long*)& __m256i_op1[2]) = 0x1a1a1a2c1a1a1a2c; +- *((unsigned long*)& __m256i_op1[1]) = 0x1a1a1a2c1a1a1a2c; +- *((unsigned long*)& __m256i_op1[0]) = 0x1a1a1a2c1a1a1a2c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffff8000; +- __m256d_out = __lasx_xvfadd_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrotr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001200000012; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000001ffff8000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000001ffff8000; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- unsigned_int_result = 0x00000000ffffffff; +- unsigned_int_out = __lasx_xvpickve2gr_wu(__m256i_op0,0x4); +- *((unsigned long*)& __m128i_op0[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff80000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_d_q(__m128i_op0,__m128i_op1,0x60); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfe3bfb01fe3bfe01; +- *((unsigned long*)& __m128i_op1[0]) = 0xfe03fe3ffe01fa21; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vsran_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xfe3bfb01; +- *((int*)& __m128_op0[2]) = 0xfe3bfe01; +- *((int*)& __m128_op0[1]) = 0xfe03fe3f; +- *((int*)& __m128_op0[0]) = 0xfe01fa21; +- *((int*)& __m128_op1[3]) = 0xfe3bfb01; +- *((int*)& __m128_op1[2]) = 0xfe3bfe01; +- *((int*)& __m128_op1[1]) = 0xfe03fe3f; +- *((int*)& __m128_op1[0]) = 0xfe01fa21; +- *((int*)& __m128_op2[3]) = 0x00000000; +- *((int*)& __m128_op2[2]) = 0x00000000; +- *((int*)& __m128_op2[1]) = 0x00000000; +- *((int*)& __m128_op2[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfmadd_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffff00; +- *((unsigned long*)& __m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffeffffff00; +- __m256i_out = __lasx_xvbitclr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000fe03fe01; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fe01fe01; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000007020701; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000007010701; +- __m128i_out = __lsx_vpcnt_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslei_wu(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffeffffff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffeffffff00; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000100; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000100; +- __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0700f8ff0700f8ff; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000007020701; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000007010701; +- *((unsigned long*)& __m128i_result[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f8000008680f1ff; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff9fffefff9ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0280000000000000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000100; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000100; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0002000200000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0002000200000000; +- __m256i_out = __lasx_xvssrlrn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000008680f1ff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xff80ffffff80ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xff80ffff8680f1ff; +- __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000008680f1ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0280000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ffffff00000000; +- __m128i_out = __lsx_vsle_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x1c1c1c1c; +- *((int*)& __m256_op0[6]) = 0x1c1c1c1c; +- *((int*)& __m256_op0[5]) = 0xfffffffe; +- *((int*)& __m256_op0[4]) = 0xffffff00; +- *((int*)& __m256_op0[3]) = 0x1c1c1c1c; +- *((int*)& __m256_op0[2]) = 0x1c1c1c1c; +- *((int*)& __m256_op0[1]) = 0xfffffffe; +- *((int*)& __m256_op0[0]) = 0xffffff00; +- *((unsigned long*)& __m256i_result[3]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffeffffff00; +- *((unsigned long*)& __m256i_result[1]) = 0x3f8000003f800000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffeffffff00; +- __m256i_out = __lasx_xvfrintrp_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffc0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffc0; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffeffffff00; +- *((unsigned long*)& __m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffeffffff00; +- *((unsigned long*)& __m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffeffffff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffeffffff00; +- *((unsigned long*)& __m256i_result[3]) = 0x3838383838383838; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffdfffffe00; +- *((unsigned long*)& __m256i_result[1]) = 0x3838383838383838; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffdfffffe00; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0a0000000a000000; +- __m128i_out = __lsx_vldi(-3318); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffff000000; +- __m256i_out = __lasx_xvslli_d(__m256i_op0,0x18); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff80ffff7e02; +- *((unsigned long*)& __m128i_op0[0]) = 0x00feff8000ff80ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0280000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff81ffff7f03; +- *((unsigned long*)& __m128i_result[0]) = 0x04ffff8101ff81ff; +- __m128i_out = __lsx_vbitset_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,-13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff81ffff7f03; +- *((unsigned long*)& __m128i_op0[0]) = 0x04ffff8101ff81ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0a0000001e000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0a000000f6000000; +- __m128i_out = __lsx_vmul_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0002000200000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0002000200000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000020002000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000020002000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x3838383838383838; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffdfffffe00; +- *((unsigned long*)& __m256i_op0[1]) = 0x3838383838383838; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffdfffffe00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffff00000000; +- __m256i_out = __lasx_xvssrarn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff80ff807e017f01; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f3b7f3f7f3b7f21; +- *((unsigned long*)& __m128i_op1[1]) = 0x0a0000001e000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a000000f6000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0980ff8174017f01; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0xff80ff80; +- *((int*)& __m128_op0[2]) = 0x7e017f01; +- *((int*)& __m128_op0[1]) = 0x7f3b7f3f; +- *((int*)& __m128_op0[0]) = 0x7f3b7f21; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vftintrz_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000020002000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000020002000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfff0fff0fff0fc00; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfff0fff0fff0fc00; +- __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff0fff0fff0fc00; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff0fff0fff0fc00; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000f880f87e; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000f880f87e; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000008000; +- __m256i_out = __lasx_xvsrlrni_h_w(__m256i_op0,__m256i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffff000000; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff80ffff7e02; +- *((unsigned long*)& __m128i_op0[0]) = 0x00feff8000ff80ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xf931fd04f832fe02; +- *((unsigned long*)& __m128i_result[1]) = 0x80007fc000003f00; +- *((unsigned long*)& __m128i_result[0]) = 0x7d187e427c993f80; +- __m128i_out = __lsx_vavg_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0a000000; +- *((int*)& __m128_op0[2]) = 0x0a000000; +- *((int*)& __m128_op0[1]) = 0x0a000000; +- *((int*)& __m128_op0[0]) = 0x0a000000; +- *((int*)& __m128_result[3]) = 0x75000000; +- *((int*)& __m128_result[2]) = 0x75000000; +- *((int*)& __m128_result[1]) = 0x75000000; +- *((int*)& __m128_result[0]) = 0x75000000; +- __m128_out = __lsx_vfrecip_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000f880f87e; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000f880f87e; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000008000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; +- __m256i_out = __lasx_xvssub_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000017f7f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000017f7f7f7f; +- __m256i_out = __lasx_xvssrlrn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0700f8ff0700f8ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0700f8ff0700f8ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_cle_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffff8000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffff8000; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000004000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0280000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7500000075000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7500000075000000; +- *((unsigned long*)& __m128i_result[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_result[0]) = 0x3bc000003a800000; +- __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000017f7f7f7f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000017f7f7f7f; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000017fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000017fff; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x80007fc000003f00; +- *((unsigned long*)& __m128i_op0[0]) = 0x7d187e427c993f80; +- *((unsigned long*)& __m128i_op1[1]) = 0x7500000075000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7500000075000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff800000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00007d1800007c99; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff000000017fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff000000017fff; +- __m256i_out = __lasx_xvssrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff000000017fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff000000017fff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsra_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff800000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007d1800007c99; +- *((unsigned long*)& __m128i_op1[1]) = 0x0a0000001e000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a621b3ebe5e1c02; +- *((unsigned long*)& __m128i_result[1]) = 0x04ffc0000f000000; +- *((unsigned long*)& __m128i_result[0]) = 0x05314c2bdf2f4c4e; +- __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000017f7f7f7f; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000017f7f7f7f; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmin_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010080; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7500000075007500; +- *((unsigned long*)& __m128i_op0[0]) = 0x00feff8000ff80ff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff800000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007d1800007c99; +- *((unsigned long*)& __m128i_result[1]) = 0x0000f50000007500; +- *((unsigned long*)& __m128i_result[0]) = 0x00007e1600007d98; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000007fffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000fe00fe; +- *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe00fe00fe; +- __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_vext2xv_du_wu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000007500; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007e1600007d98; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000fe00fe; +- *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000f50000fe75fe; +- *((unsigned long*)& __m128i_result[0]) = 0x00fe7efe00fe7dfe; +- __m128i_out = __lsx_vmax_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff7fffffff; +- __m256i_out = __lasx_xvssrln_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010080; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwod_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f7f00007f7f7500; +- *((unsigned long*)& __m128i_op1[0]) = 0x3b42017f3a7f7f01; +- *((unsigned long*)& __m128i_result[1]) = 0x04faf60009f5f092; +- *((unsigned long*)& __m128i_result[0]) = 0x04fafa9200000000; +- __m128i_out = __lsx_vmulwod_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff9fffefff9ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x04faf60009f5f092; +- *((unsigned long*)& __m128i_op1[0]) = 0x04fafa9200000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfc06066e00000000; +- __m128i_out = __lsx_vsigncov_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x04faf60009f5f092; +- *((unsigned long*)& __m128i_op0[0]) = 0x04fafa9200000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xfff9fffefff9ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000004fa000009f5; +- *((unsigned long*)& __m128i_result[0]) = 0x000004f3fffffff9; +- __m128i_out = __lsx_vaddwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4480000044800000; +- *((unsigned long*)& __m128i_result[0]) = 0x45c0000044800000; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000007500; +- *((unsigned long*)& __m128i_op0[0]) = 0x00007e1600007d98; +- *((unsigned long*)& __m128i_result[1]) = 0x0000f50000000900; +- *((unsigned long*)& __m128i_result[0]) = 0x0000090900000998; +- __m128i_out = __lsx_vmini_b(__m128i_op0,9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff800000; +- *((unsigned long*)& __m256i_result[3]) = 0xffc0000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffc0000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffff0000; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffffffbfffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000800000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffffffbfffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000800000; +- *((unsigned long*)& __m256i_result[3]) = 0x0102020202010202; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0102020202010202; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000010000; +- __m256i_out = __lasx_xvsrlri_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xa9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000fe00fe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00fe00fe00fe00fe; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000007500; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007e1600007d98; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe7fffffff; +- __m128i_out = __lsx_vssran_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4480000044800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x45c0000044800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe7fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x4481000144810001; +- *((unsigned long*)& __m128i_result[0]) = 0x45c04000c4808000; +- __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3fffffff3fffc000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3fffffff3fffc000; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x04faf60009f5f092; +- *((unsigned long*)& __m128i_op0[0]) = 0x04fafa9200000000; +- int_op1 = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x04faf600fff5f092; +- *((unsigned long*)& __m128i_result[0]) = 0x04fafa9200000000; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000100010; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000000900; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000090900000998; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00007a8000000480; +- *((unsigned long*)& __m128i_result[0]) = 0x00000485000004cc; +- __m128i_out = __lsx_vavgr_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007a8000000480; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000485000004cc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cun_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffff000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000100010; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000010000f; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000010000f; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000000900; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000090900000998; +- *((unsigned long*)& __m128i_result[1]) = 0xffff00ffffff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_hu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_h(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe7fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x3a8100013a810001; +- *((unsigned long*)& __m128i_result[0]) = 0x7bc04000ba808000; +- __m128i_out = __lsx_vbitset_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00007a8000000480; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000485000004cc; +- *((unsigned long*)& __m128i_op1[1]) = 0x00007a8000000480; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000485000004cc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000f50000000900; +- *((unsigned long*)& __m128i_result[0]) = 0x0000090a00000998; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff3a81ffff89fd; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffb3c3ffff51ba; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0802080408060803; +- __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000000900; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000090a00000998; +- *((unsigned long*)& __m128i_result[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_result[0]) = 0x000ef0000000003b; +- __m128i_out = __lsx_vsrlr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff00ffffff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000f50000000900; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000090900000998; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff000900ffff98; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff760386bdae46; +- *((unsigned long*)& __m128i_op0[0]) = 0xc1fc7941bc7e00ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0802080408060803; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff000086bd; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ca000000c481; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3bc000003a800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00fe00fe7fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x1d4000001d400000; +- *((unsigned long*)& __m128i_result[0]) = 0x1e5f007f5d400000; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_w(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffff; +- *((int*)& __m256_op0[6]) = 0xffffffff; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0xffffffff; +- *((int*)& __m256_op0[2]) = 0xffffffff; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00100010; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00100010; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00100010; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00100010; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_saf_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff000086bd; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ca000000c481; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslli_h(__m256i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff00ffffff00ff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00ff00ff00ff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00ff000900ffff98; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffff7fffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffff7fffffff; +- __m128i_out = __lsx_vssrlrni_w_d(__m128i_op0,__m128i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000f50000000900; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000090900000998; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x7fffffffffffffff; +- __m128i_out = __lsx_vssrlni_d_q(__m128i_op0,__m128i_op1,0x20); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000003fffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000003fffff; +- __m256i_out = __lasx_xvsat_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff760386bdae46; +- *((unsigned long*)& __m128i_op1[0]) = 0xc1fc7941bc7e00ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffff7603; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0xc3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000003fffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000003fffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvaddwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000ffff7603; +- *((unsigned long*)& __m128d_op1[1]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128d_op1[0]) = 0x7fffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sne_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x45000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x44000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x3cb504f3; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x3d3504f3; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4500000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4400000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff000000ff000000; +- __m128i_out = __lsx_vssrarn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000003fffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000003fffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff010100000001; +- __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff8000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffff80; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffff80; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_b_h(__m256i_op0,__m256i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000ef0000000003b; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_h_w(__m256i_op0,__m256i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0802080408060803; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00001fffe0001fff; +- __m128i_out = __lsx_vsra_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; +- *((int*)& __m128_result[3]) = 0x577fff00; +- *((int*)& __m128_result[2]) = 0x577fff00; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x596f0000; +- __m128_out = __lsx_vffint_s_l(__m128i_op0,__m128i_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffff0101; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffff0101; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0xffff0101; +- *((int*)& __m256_result[4]) = 0x00000001; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0xffff0101; +- *((int*)& __m256_result[0]) = 0x00000001; +- __m256_out = __lasx_xvfadd_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x3fffffff3fffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x3fffffff3fffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000810001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000810001; +- __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000440efffff000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000003b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000440efffff000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000003b; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffff0101; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffff0101; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvfcmp_sne_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000440efffff000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000003b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x440ef000440ef000; +- *((unsigned long*)& __m128i_op2[0]) = 0x4400000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000440efffff000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000003b; +- __m128i_out = __lsx_vmaddwev_h_bu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000008000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000008000000080; +- __m256i_out = __lasx_xvssrlrni_wu_d(__m256i_op0,__m256i_op1,0x39); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffff0101; +- *((int*)& __m256_op1[4]) = 0x00000001; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffff0101; +- *((int*)& __m256_op1[0]) = 0x00000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x440ef000440ef000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4400000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; +- *((unsigned long*)& __m128i_result[1]) = 0x0f8d33000f8d3300; +- *((unsigned long*)& __m128i_result[0]) = 0x0003b80000000000; +- __m128i_out = __lsx_vmulwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000ffff; +- __m256i_out = __lasx_xvpickev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff2356fe165486; +- *((unsigned long*)& __m128i_op0[0]) = 0x5efeb3165bd7653d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseqi_w(__m128i_op0,5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vseqi_h(__m128i_op0,0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0f8d33000f8d3300; +- *((unsigned long*)& __m128i_op0[0]) = 0x0003b80000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0f8d33000f8d32fd; +- *((unsigned long*)& __m128i_result[0]) = 0x0003b7fffffffffd; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000007fff9; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff2356fe165486; +- *((unsigned long*)& __m128i_op1[0]) = 0x5efeb3165bd7653d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000235600005486; +- *((unsigned long*)& __m128i_result[0]) = 0x0000b31600006544; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vneg_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff2356fe165486; +- *((unsigned long*)& __m128i_op1[0]) = 0x5efeb3165bd7653d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffff2356fe165486; +- __m128i_out = __lsx_vpackod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffff010100000001; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffff010100000001; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sule_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000235600005486; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000b31600006544; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_wu(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff2356fe165486; +- *((unsigned long*)& __m128i_op1[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000ef0000000003b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000003b0000ffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffff2356fe165486; +- __m128i_out = __lsx_vextrins_w(__m128i_op0,__m128i_op1,0x70); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x50); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff0fffffff0; +- __m256i_out = __lasx_xvmini_w(__m256i_op0,-16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_hu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffff; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefefdfdfdfd; +- *((unsigned long*)& __m256i_result[1]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefefdfdfdfd; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_d(__m256i_op0,0x26); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvmini_w(__m256i_op0,-1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00003a7fc58074ff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000eeff1100e; +- __m128i_out = __lsx_vmuh_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff2356fe165486; +- *((unsigned long*)& __m128i_op0[0]) = 0x5efeb3165bd7653d; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000007; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000007; +- __m128i_out = __lsx_vmini_du(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3a8000003a800000; +- __m128i_out = __lsx_vexth_q_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfffffff0; +- *((int*)& __m256_op0[6]) = 0xfffffff0; +- *((int*)& __m256_op0[5]) = 0xfffffff0; +- *((int*)& __m256_op0[4]) = 0xfffffff0; +- *((int*)& __m256_op0[3]) = 0xfffffff0; +- *((int*)& __m256_op0[2]) = 0xfffffff0; +- *((int*)& __m256_op0[1]) = 0xfffffff0; +- *((int*)& __m256_op0[0]) = 0xfffffff0; +- *((unsigned long*)& __m256d_result[3]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256d_result[2]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256d_result[1]) = 0xfffffffe00000000; +- *((unsigned long*)& __m256d_result[0]) = 0xfffffffe00000000; +- __m256d_out = __lasx_xvfcvth_d_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfefefefefdfdfdfd; +- *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfefefefefdfdfdfd; +- *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010202020203; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010201010102; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010202020203; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010201010102; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x3a800000; +- *((int*)& __m128_op0[2]) = 0x3a800000; +- *((int*)& __m128_op0[1]) = 0x000ef000; +- *((int*)& __m128_op0[0]) = 0x0000003b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vftintrp_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0xffffffff; +- *((int*)& __m256_op0[4]) = 0xffffffff; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0xffffffff; +- *((int*)& __m256_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzl_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000feff2356; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fd165486; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000007; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000007; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000246d9755; +- *((unsigned long*)& __m128i_result[0]) = 0x000000002427c2ee; +- __m128i_out = __lsx_vdiv_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000ef0000000003b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000056; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffff86; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000eefff; +- *((unsigned long*)& __m128i_result[0]) = 0xf8e1a03affffe3e2; +- __m128i_out = __lsx_vmulwev_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128d_op0[0]) = 0x000ef0000000003b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; +- *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000eefff; +- *((unsigned long*)& __m128i_op1[0]) = 0xf8e1a03affffe3e2; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000efffefff; +- *((unsigned long*)& __m128i_result[0]) = 0xa03aa03ae3e2e3e2; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x00ffffff00ffffff; +- __m128i_out = __lsx_vslei_b(__m128i_op0,15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; +- unsigned_int_result = 0x00000000000000ff; +- unsigned_int_out = __lsx_vpickve2gr_bu(__m128i_op0,0xc); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000056000056; +- *((unsigned long*)& __m128i_op0[0]) = 0x3a8000003a800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000efffefff; +- *((unsigned long*)& __m128i_op1[0]) = 0xa03aa03ae3e2e3e2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_d_q(__m128i_op0,__m128i_op1,0x75); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffff0fffffff0; +- __m256i_out = __lasx_xvfrintrp_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000002427c2ee; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_h_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsllwil_h_b(__m256i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010202020203; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010201010102; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010202020203; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010201010102; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffff0fffffff0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0101010202020203; +- *((unsigned long*)& __m256d_op1[2]) = 0x0101010201010102; +- *((unsigned long*)& __m256d_op1[1]) = 0x0101010202020203; +- *((unsigned long*)& __m256d_op1[0]) = 0x0101010201010102; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; +- *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000003e0000003f; +- __m128i_out = __lsx_vsrli_w(__m128i_op0,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010202020203; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010201010102; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010202020203; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010201010102; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0x00ffffff; +- *((int*)& __m128_op0[0]) = 0x00ffffff; +- *((int*)& __m128_op1[3]) = 0x0000feff; +- *((int*)& __m128_op1[2]) = 0x23560000; +- *((int*)& __m128_op1[1]) = 0x0000fd16; +- *((int*)& __m128_op1[0]) = 0x54860000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; +- *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; +- *((unsigned long*)& __m128i_op1[1]) = 0x3a80613fda5dcb4a; +- *((unsigned long*)& __m128i_op1[0]) = 0x93f0b81a914c003b; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000051649b6; +- *((unsigned long*)& __m128i_result[0]) = 0xd2f005e44bb43416; +- __m128i_out = __lsx_vmulwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000051649b6; +- *((unsigned long*)& __m128i_op0[0]) = 0xd2f005e44bb43416; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000003e0000003f; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000051649b6; +- *((unsigned long*)& __m128i_result[0]) = 0x0000003e0000003f; +- __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000760151; +- *((unsigned long*)& __m128i_op0[0]) = 0x003e0021009a009a; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000003e2427c2ee; +- *((unsigned long*)& __m128i_result[1]) = 0x00001e5410082727; +- *((unsigned long*)& __m128i_result[0]) = 0x00007f7f00107f7f; +- __m128i_out = __lsx_vssrlrni_b_h(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000051649b6; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000003e0000003f; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x41945926d8000000; +- __m128d_out = __lsx_vffinth_d_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x41945926d8000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00001e5410082727; +- *((unsigned long*)& __m128i_op1[0]) = 0x00007f7f00107f7f; +- *((unsigned long*)& __m128i_result[1]) = 0x0001001001000080; +- *((unsigned long*)& __m128i_result[0]) = 0x4195d926d8018000; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlni_hu_w(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000f0f0f0f0f0f0; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xff01ff01ff01f010; +- *((unsigned long*)& __m256i_op1[2]) = 0xff01ff01ff01f010; +- *((unsigned long*)& __m256i_op1[1]) = 0xff01ff01ff01f010; +- *((unsigned long*)& __m256i_op1[0]) = 0xff01ff01ff01f010; +- *((unsigned long*)& __m256i_result[3]) = 0x000078780000f0f1; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000078780000f0f1; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrlr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x3a80613fda5dcb4a; +- *((unsigned long*)& __m128i_op0[0]) = 0x93f0b81a914c003b; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000feff23560000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000fd1654860000; +- *((unsigned long*)& __m128i_result[1]) = 0x1e242e4d68dc0000; +- *((unsigned long*)& __m128i_result[0]) = 0x2ff8fddb7ae20000; +- __m128i_out = __lsx_vmulwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000eefff; +- *((unsigned long*)& __m128i_op0[0]) = 0xf8e1a03affffe3e2; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000002427c2ee; +- *((unsigned long*)& __m128i_result[1]) = 0xf8e10000a03a0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff2427e3e2c2ee; +- __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363abdf16; +- *((unsigned long*)& __m128i_op0[0]) = 0x41f8e08016161198; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000246d9755; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000002427c2ee; +- *((unsigned long*)& __m128i_result[1]) = 0x636363633f3e47c1; +- *((unsigned long*)& __m128i_result[0]) = 0x41f8e080f1ef4eaa; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363abdf16; +- *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; +- *((unsigned long*)& __m128i_result[1]) = 0x9c9d9b9bbfaa20e9; +- *((unsigned long*)& __m128i_result[0]) = 0xbe081c963e6fee68; +- __m128i_out = __lsx_vsub_q(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrli_d(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0fffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0fffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaxi_b(__m128i_op0,-16); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001001001000080; +- *((unsigned long*)& __m128i_op0[0]) = 0x4195d926d8018000; +- *((int*)& __m128_result[3]) = 0x33800000; +- *((int*)& __m128_result[2]) = 0x35800000; +- *((int*)& __m128_result[1]) = 0x37800000; +- *((int*)& __m128_result[0]) = 0x37000000; +- __m128_out = __lsx_vfcvth_s_h(__m128i_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff010100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; +- __m256i_out = __lasx_xvsle_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0fffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_op1[0]) = 0x41957fff7fff7fff; +- *((unsigned long*)& __m128i_result[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_result[0]) = 0xbf6b810181018101; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363abdf16; +- *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000030000; +- __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x63636363; +- *((int*)& __m128_op0[2]) = 0x63abdf16; +- *((int*)& __m128_op0[1]) = 0x41f8e080; +- *((int*)& __m128_op0[0]) = 0x16161198; +- *((unsigned long*)& __m128i_result[1]) = 0x6363636363abdf16; +- *((unsigned long*)& __m128i_result[0]) = 0x420000003f800000; +- __m128i_out = __lsx_vfrintrp_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000080801030000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000080103040000; +- __m128i_out = __lsx_vsrlri_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x6c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000001e001e001e0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000001e001e001e0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffffffffffff; +- __m256i_out = __lasx_xvhaddw_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000feff23560000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000fd1654860000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_du(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000001e001e001e0; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000001e001e001e0; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvaddwev_q_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c9d9b9bbfaa20e9; +- *((unsigned long*)& __m128i_op0[0]) = 0xbe081c963e6fee68; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000feff23560000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000fd1654860000; +- *((unsigned long*)& __m128i_result[1]) = 0x6363636463abdf17; +- *((unsigned long*)& __m128i_result[0]) = 0x41f8e08016161198; +- __m128i_out = __lsx_vabsd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x7fff7fff; +- *((int*)& __m128_op0[2]) = 0x7fff7fff; +- *((int*)& __m128_op0[1]) = 0xbf6b8101; +- *((int*)& __m128_op0[0]) = 0x81018101; +- *((int*)& __m128_op1[3]) = 0xe3636363; +- *((int*)& __m128_op1[2]) = 0x63abdf16; +- *((int*)& __m128_op1[1]) = 0x41f8e080; +- *((int*)& __m128_op1[0]) = 0x16161198; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sor_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x01010101; +- *((int*)& __m256_op0[6]) = 0x01010101; +- *((int*)& __m256_op0[5]) = 0x01010101; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x01010101; +- *((int*)& __m256_op0[2]) = 0x01010101; +- *((int*)& __m256_op0[1]) = 0x01010101; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((int*)& __m256_op1[7]) = 0x000001e0; +- *((int*)& __m256_op1[6]) = 0x01e001e0; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x000001e0; +- *((int*)& __m256_op1[2]) = 0x01e001e0; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636463abdf17; +- *((unsigned long*)& __m128i_op0[0]) = 0x41f8e08016161198; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x6363636463abdf17; +- *((unsigned long*)& __m128i_result[0]) = 0x41f8e08016161198; +- __m128i_out = __lsx_vmadd_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvadda_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x01010101; +- *((int*)& __m256_op0[6]) = 0x01010101; +- *((int*)& __m256_op0[5]) = 0x01010101; +- *((int*)& __m256_op0[4]) = 0x00000001; +- *((int*)& __m256_op0[3]) = 0x01010101; +- *((int*)& __m256_op0[2]) = 0x01010101; +- *((int*)& __m256_op0[1]) = 0x01010101; +- *((int*)& __m256_op0[0]) = 0x00000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x5847b72626ce61ef; +- *((unsigned long*)& __m128d_op0[0]) = 0x110053f401e7cced; +- *((unsigned long*)& __m128i_result[1]) = 0x5847b72626ce61ef; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrz_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_result[2]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_result[0]) = 0x0000010100000101; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000807bf0a1f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000800ecedee68; +- *((unsigned long*)& __m128i_op1[1]) = 0x5847b72626ce61ef; +- *((unsigned long*)& __m128i_op1[0]) = 0x110053f401e7cced; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x5847bf2de5d8816f; +- __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000010100000101; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000010100000101; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000010100000101; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_ceq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x000000000000001e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001e00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_d(__m256i_op0,12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x636363633f3e47c1; +- *((unsigned long*)& __m128i_op0[0]) = 0x41f8e080f1ef4eaa; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000807bf0a1f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000800ecedee68; +- *((unsigned long*)& __m128i_result[1]) = 0x63636b6afe486741; +- *((unsigned long*)& __m128i_result[0]) = 0x41f8e880ffffffff; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000010100000101; +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmod_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x5847b72626ce61ef; +- *((unsigned long*)& __m128i_op0[0]) = 0x110053f401e7cced; +- *((unsigned long*)& __m128i_op1[1]) = 0x5847b72626ce61ef; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0005847b00011005; +- *((unsigned long*)& __m128i_result[0]) = 0x0005847b00000000; +- __m128i_out = __lsx_vsrani_w_d(__m128i_op0,__m128i_op1,0x2c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0005847b00011005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0005847b00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000807bf0a1f80; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000800ecedee68; +- *((unsigned long*)& __m128i_result[1]) = 0x0005840100000005; +- *((unsigned long*)& __m128i_result[0]) = 0x0005847b00000000; +- __m128i_out = __lsx_vsrlr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; +- *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x63636b6afe486741; +- *((unsigned long*)& __m128i_result[0]) = 0x41f8e880ffffffff; +- __m128i_out = __lsx_vmaxi_d(__m128i_op0,-2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; +- *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000027; +- __m128i_out = __lsx_vmskltz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x57f160c4a1750eda; +- *((unsigned long*)& __m128i_result[1]) = 0x000002bf8b062000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffd0ba876d000; +- __m128i_out = __lsx_vsllwil_d_w(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000807bf0a1f80; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000800ecedee68; +- *((unsigned long*)& __m128i_op1[1]) = 0x0005840100000005; +- *((unsigned long*)& __m128i_op1[0]) = 0x0005847b00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0001f0a20001cedf; +- *((unsigned long*)& __m128i_result[0]) = 0x0058000000580000; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_h_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0101010101010110; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0101010101010110; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvbitrev_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000002bf8b062000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffd0ba876d000; +- *((unsigned long*)& __m128i_op1[1]) = 0x63636b6afe486741; +- *((unsigned long*)& __m128i_op1[0]) = 0x41f8e880ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000ff110db0; +- *((unsigned long*)& __m128i_result[0]) = 0x41f7be08ffff578a; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrln_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000002bf8b062000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffd0ba876d000; +- *((unsigned long*)& __m128i_op1[1]) = 0xe363636363abdf16; +- *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; +- *((unsigned long*)& __m128i_op2[1]) = 0x0005840100000005; +- *((unsigned long*)& __m128i_op2[0]) = 0x0005847b00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0004e8f09e99b528; +- *((unsigned long*)& __m128i_result[0]) = 0xcf1225129ad22b6e; +- __m128i_out = __lsx_vmaddwod_q_du(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; +- *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xe363636363abdf16; +- *((unsigned long*)& __m128i_op1[0]) = 0x41f8e08016161198; +- *((unsigned long*)& __m128i_result[1]) = 0x0000cecd00004657; +- *((unsigned long*)& __m128i_result[0]) = 0x0000c90000011197; +- __m128i_out = __lsx_vaddwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x63636b6a; +- *((int*)& __m128_op0[2]) = 0xfe486741; +- *((int*)& __m128_op0[1]) = 0x41f8e880; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xe3636363; +- *((int*)& __m128_op1[2]) = 0x63abdf16; +- *((int*)& __m128_op1[1]) = 0x41f8e080; +- *((int*)& __m128_op1[0]) = 0x16161198; +- *((int*)& __m128_op2[3]) = 0x00c27580; +- *((int*)& __m128_op2[2]) = 0x00bccf42; +- *((int*)& __m128_op2[1]) = 0x00a975be; +- *((int*)& __m128_op2[0]) = 0x00accf03; +- *((int*)& __m128_result[3]) = 0xff800000; +- *((int*)& __m128_result[2]) = 0xff800000; +- *((int*)& __m128_result[1]) = 0x4471fb84; +- *((int*)& __m128_result[0]) = 0xffffffff; +- __m128_out = __lsx_vfmsub_s(__m128_op0,__m128_op1,__m128_op2); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x63636b6afe486741; +- *((unsigned long*)& __m128i_op0[0]) = 0x41f8e880ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslti_d(__m128i_op0,-13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0101010101010110; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op1[1]) = 0x0101010101010110; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0005840100000005; +- *((unsigned long*)& __m128i_op0[0]) = 0x0005847b00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x636363633f3e47c1; +- *((unsigned long*)& __m128i_op1[0]) = 0x41f8e080f1ef4eaa; +- *((unsigned long*)& __m128i_result[1]) = 0xa000308000008002; +- *((unsigned long*)& __m128i_result[0]) = 0x0500847b00000000; +- __m128i_out = __lsx_vrotr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa000308000008002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0500847b00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vslti_w(__m128i_op0,7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0101010101010110; +- *((unsigned long*)& __m256i_op2[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op2[1]) = 0x0101010101010110; +- *((unsigned long*)& __m256i_op2[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_d_wu_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000001e00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000f00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavg_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x63636363; +- *((int*)& __m128_op0[2]) = 0x3f3e47c1; +- *((int*)& __m128_op0[1]) = 0x41f8e080; +- *((int*)& __m128_op0[0]) = 0xf1ef4eaa; +- *((int*)& __m128_op1[3]) = 0x0000cecd; +- *((int*)& __m128_op1[2]) = 0x00004657; +- *((int*)& __m128_op1[1]) = 0x0000c900; +- *((int*)& __m128_op1[0]) = 0x00011197; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vfcmp_clt_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x00c2758000bccf42; +- *((unsigned long*)& __m128d_op0[0]) = 0x00a975be00accf03; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x00000000ffffffff; +- __m128d_out = __lsx_vfnmsub_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000001e00000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0002000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarni_h_w(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x0000000c; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x0000000c; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xa000308000008002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0500847b00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00c2758000bccf42; +- *((unsigned long*)& __m128i_op0[0]) = 0x00a975be00accf03; +- *((unsigned long*)& __m128i_op1[1]) = 0x00c2758000bccf42; +- *((unsigned long*)& __m128i_op1[0]) = 0x00a975be00accf03; +- *((unsigned long*)& __m128i_result[1]) = 0x00c2758000bccf42; +- *((unsigned long*)& __m128i_result[0]) = 0x00a975be00accf03; +- __m128i_out = __lsx_vavg_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffbfffffffb; +- __m256i_out = __lasx_xvrotri_h(__m256i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffa7; +- *((unsigned long*)& __m128i_op1[1]) = 0x00c2758000bccf42; +- *((unsigned long*)& __m128i_op1[0]) = 0x00a975be00accf03; +- *((unsigned long*)& __m128i_op2[1]) = 0x00c2758000bccf42; +- *((unsigned long*)& __m128i_op2[0]) = 0x00a975be00accf03; +- *((unsigned long*)& __m128i_result[1]) = 0x0000930400008a10; +- *((unsigned long*)& __m128i_result[0]) = 0x00006f9100007337; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000930400008a10; +- *((unsigned long*)& __m128i_op0[0]) = 0x00006f9100007337; +- *((unsigned long*)& __m128i_op1[1]) = 0x00c2758000bccf42; +- *((unsigned long*)& __m128i_op1[0]) = 0x00a975be00accf03; +- *((unsigned long*)& __m128i_result[1]) = 0x00250023001c001d; +- *((unsigned long*)& __m128i_result[0]) = 0x309d2f342a5d2b34; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00c2758000bccf42; +- *((unsigned long*)& __m128i_op0[0]) = 0x00a975be00accf03; +- *((unsigned long*)& __m128i_op1[1]) = 0x00250023001c001d; +- *((unsigned long*)& __m128i_op1[0]) = 0x309d2f342a5d2b34; +- *((unsigned long*)& __m128i_result[1]) = 0x00060eb000000006; +- *((unsigned long*)& __m128i_result[0]) = 0x0000075c00000cf0; +- __m128i_out = __lsx_vsrlr_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x0000cecd; +- *((int*)& __m128_op1[2]) = 0x00004657; +- *((int*)& __m128_op1[1]) = 0x0000c900; +- *((int*)& __m128_op1[0]) = 0x00011197; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256d_op0[2]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256d_op0[1]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256d_op0[0]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cueq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffefffffffeff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvavg_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffbfffffffb; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffbfffffffb; +- __m256i_out = __lasx_xvmin_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00060eb000000006; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000075c00000cf0; +- *((unsigned long*)& __m128i_result[1]) = 0xfffaf1500000fffa; +- *((unsigned long*)& __m128i_result[0]) = 0x0000f8a40000f310; +- __m128i_out = __lsx_vsub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000cecd00004657; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000c90000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00019d9a00008cae; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vpackod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00250023001c001d; +- *((unsigned long*)& __m128i_op0[0]) = 0x309d2f342a5d2b34; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_du(__m128i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000f00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000700000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsat_wu(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000f00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000f00000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsra_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_q_du_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00050eb00000fffa; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a50000f310; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000000; +- __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffaf1500000fffa; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a40000f310; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000003e2; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x26); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xe4e4e4e4e4e4e4e4; +- *((unsigned long*)& __m256i_result[2]) = 0xe4e4e4e4e4e4e4e4; +- *((unsigned long*)& __m256i_result[1]) = 0xe4e4e4e4e4e4e4e4; +- *((unsigned long*)& __m256i_result[0]) = 0xe4e4e4e4e4e4e4e4; +- __m256i_out = __lasx_xvsubi_bu(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000cecd00004657; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000c90000011197; +- *((unsigned long*)& __m128i_result[1]) = 0x0000200000800000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100800000; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000002000000000; +- __m128i_out = __lsx_vbitseti_d(__m128i_op0,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0008000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0008000000000000; +- __m256i_out = __lasx_xvsrlri_h(__m256i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_b(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000000c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseqi_w(__m256i_op0,9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xfffffffb; +- *((int*)& __m256_op0[6]) = 0xfffffffb; +- *((int*)& __m256_op0[5]) = 0xfffffffb; +- *((int*)& __m256_op0[4]) = 0xfffffffb; +- *((int*)& __m256_op0[3]) = 0xfffffffb; +- *((int*)& __m256_op0[2]) = 0xfffffffb; +- *((int*)& __m256_op0[1]) = 0xfffffffb; +- *((int*)& __m256_op0[0]) = 0xfffffffb; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; +- __m256i_out = __lasx_xvfclass_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000003e2; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000003ffe2; +- __m128i_out = __lsx_vexth_h_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfffffffb; +- *((int*)& __m256_op0[6]) = 0xfffffffb; +- *((int*)& __m256_op0[5]) = 0xfffffffb; +- *((int*)& __m256_op0[4]) = 0xfffffffb; +- *((int*)& __m256_op0[3]) = 0xfffffffb; +- *((int*)& __m256_op0[2]) = 0xfffffffb; +- *((int*)& __m256_op0[1]) = 0xfffffffb; +- *((int*)& __m256_op0[0]) = 0xfffffffb; +- *((int*)& __m256_op1[7]) = 0x0000ffff; +- *((int*)& __m256_op1[6]) = 0x0001000e; +- *((int*)& __m256_op1[5]) = 0x0000ffff; +- *((int*)& __m256_op1[4]) = 0x0000ffff; +- *((int*)& __m256_op1[3]) = 0x0000ffff; +- *((int*)& __m256_op1[2]) = 0x0000ffff; +- *((int*)& __m256_op1[1]) = 0x0000ffff; +- *((int*)& __m256_op1[0]) = 0x0000ffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cule_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x007fffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x007fffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x003fffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x003fffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xc7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrani_h_w(__m256i_op0,__m256i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x003fffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x003fffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x001fffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x001fffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_op2[7]) = 0x001fffff; +- *((int*)& __m256_op2[6]) = 0xffffffff; +- *((int*)& __m256_op2[5]) = 0xffffffff; +- *((int*)& __m256_op2[4]) = 0xffffffff; +- *((int*)& __m256_op2[3]) = 0x001fffff; +- *((int*)& __m256_op2[2]) = 0xffffffff; +- *((int*)& __m256_op2[1]) = 0xffffffff; +- *((int*)& __m256_op2[0]) = 0xffffffff; +- *((int*)& __m256_result[7]) = 0x001fffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0x001fffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00050eb00000fffa; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a50000f310; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000003e2; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x00050eb00000fffa; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000f8a50000f310; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0000ffff0000; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_d_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000001; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cle_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x007fffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x007fffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x003fffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x003fffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvavg_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000f1384; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000004ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vssrlrni_bu_h(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_b(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000001000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000001000000000; +- __m128i_out = __lsx_vavgr_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvilvl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_du_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000004ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000667ae56; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; +- __m128i_out = __lsx_vdiv_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000200000002; +- __m256i_out = __lasx_xvmod_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; +- int_op1 = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve_b(__m256i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000001000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000001000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000002000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vslt_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x003fffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x003fffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_q_du(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x0667ae56; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000020; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftinth_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000001fffd; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000001fffd; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000700020004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000700020004; +- __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fffe0002; +- __m128i_out = __lsx_vmuh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_hu(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000020; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsrar_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000667ae56; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000000004ff; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000667ae56; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; +- __m128i_out = __lsx_vbitclr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffe0002; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000667ae56; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000020; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000667ae56; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000020; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0040000000000003; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0040000000000003; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[3]) = 0x0020000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0020000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvavg_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0040000000000003; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0040000000000003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvsubwev_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000003e2; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvdiv_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvhaddw_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vsrai_h(__m128i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x7ff0000000000000; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; +- __m256i_out = __lasx_xvdiv_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000700020004; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000700020004; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op2[3]) = 0x0040000000000003; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_op2[1]) = 0x0040000000000003; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000003; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[2]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[0]) = 0x000000070002000a; +- __m256i_out = __lasx_xvmaddwev_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_w(__m128i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x000000060002000a; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x000000060002000a; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0040000000000003; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_op1[1]) = 0x0040000000000003; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000007; +- *((unsigned long*)& __m256i_result[3]) = 0xffbffffffffffffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m256i_result[1]) = 0xffbffffffffffffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffa; +- __m256i_out = __lasx_xvorn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000fffe0001; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff0001fffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff0000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff0000ffff; +- __m128i_out = __lsx_vavg_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffff00ff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vmin_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffffd; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffffe; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[2]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_result[0]) = 0x000000070002000a; +- __m256i_out = __lasx_xvmax_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffbffffffffffffe; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m256i_op1[1]) = 0xffbffffffffffffe; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffff00ff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffff0000; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0x0000ffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sule_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vilvh_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000700000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000700000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x60); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffff00ff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000000a; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000a; +- __m256i_out = __lasx_xvmulwev_d_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x7f800000; +- *((int*)& __m128_result[0]) = 0x7f800000; +- __m128_out = __lsx_vfrecip_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffffe; +- __m128i_out = __lsx_vaddwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001fffe00014b41; +- *((unsigned long*)& __m128i_op1[0]) = 0x0001fffe0001ffde; +- *((unsigned long*)& __m128i_result[1]) = 0xffff0002ffffb4bf; +- *((unsigned long*)& __m128i_result[0]) = 0xffff0002ffff0022; +- __m128i_out = __lsx_vssub_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000600000006; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000070002000a; +- *((unsigned long*)& __m256i_op1[3]) = 0x001fffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x001fffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_d(__m256i_op0,0x32); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000700000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000700000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000700000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000700000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000007; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000020000000b; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000007; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000020000000a; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x000000000000000a; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x000000000000000a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000032; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000032; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffce; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffce; +- __m256i_out = __lasx_xvneg_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001fffe00014b41; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001ffde; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000100020002; +- __m128i_out = __lsx_vssrlrn_bu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f8100017f810001; +- *((unsigned long*)& __m128i_result[0]) = 0x7f8100017f810001; +- __m128i_out = __lsx_vbitrev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrml_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7f8100017f810001; +- *((unsigned long*)& __m128d_op0[0]) = 0x7f8100017f810001; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_clt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000ffffffce; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000ffffffce; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000700000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000700000000; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x8000000000000000; +- __m256d_out = __lasx_xvfnmsub_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f8000007f800000; +- __m128i_out = __lsx_vsra_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrln_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0080000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0080000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0080000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0080000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000000000000a; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000000000000a; +- *((unsigned long*)& __m256i_result[3]) = 0x0040000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[1]) = 0x0040000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000005; +- __m256i_out = __lasx_xvavgr_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f8000017f800001; +- *((unsigned long*)& __m128i_result[0]) = 0x7f8000017f800001; +- __m128i_out = __lsx_vbitset_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x80000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x80000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x80000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x80000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_sult_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0040000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0040000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000a000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000a000; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x001fffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x001fffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0080000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0080000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00020001; +- *((int*)& __m128_op0[0]) = 0x00020002; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x7f800000; +- *((int*)& __m128_result[1]) = 0x607fffc0; +- *((int*)& __m128_result[0]) = 0x607fff80; +- __m128_out = __lsx_vfrsqrt_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000017f800001; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f8000017f800001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000007f800001; +- *((unsigned long*)& __m128i_result[0]) = 0x000000007f800001; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000100020002; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000100020002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000100020002; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000ffffffce; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000ffffffce; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrp_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotri_d(__m128i_op0,0x21); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f7f00007f7f0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f7f80807f7f8080; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000fffe0000fffe; +- *((unsigned long*)& __m128i_op2[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op2[0]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[0]) = 0x0808080808080808; +- __m256i_out = __lasx_xvclz_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvneg_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrln_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((int*)& __m128_result[3]) = 0x7f800000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vssrln_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[0]) = 0x6363636463636363; +- __m128i_out = __lsx_vadda_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256d_result[3]) = 0x606a20bd700e59a3; +- *((unsigned long*)& __m256d_result[2]) = 0x6066a09e66c5f1bb; +- *((unsigned long*)& __m256d_result[1]) = 0x606a20bd700e59a3; +- *((unsigned long*)& __m256d_result[0]) = 0x6066a09e66c5f1bb; +- __m256d_out = __lasx_xvfrsqrt_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op0[0]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmuh_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffce; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffce; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x6b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000e2e36363; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000063636363; +- __m128i_out = __lsx_vhaddw_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpackod_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvxor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x317fce80317fce80; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000500020002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000700020033; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000500020002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000700020033; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000500020002; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000700020033; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000500020002; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000700020033; +- *((unsigned long*)& __m256i_result[3]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1400080008000000; +- __m256i_out = __lasx_xvssrarni_d_q(__m256i_op0,__m256i_op1,0x26); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x317fce80317fce80; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000ff00; +- __m128i_out = __lsx_vmsknz_b(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sor_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa2e3a36363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0xa2e3a36463636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f8000007f800000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000a2e300006363; +- *((unsigned long*)& __m128i_result[0]) = 0x0000a2e300006363; +- __m128i_out = __lsx_vhsubw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_result[1]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1400080008000000; +- __m256i_out = __lasx_xvsadd_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x317fce80317fce80; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xf0000000f0000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslli_h(__m128i_op0,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x317fce80317fce80; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000fffe0000fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffff0000ffff; +- __m128i_out = __lsx_vslt_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f80000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000ff00; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000400000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000007f80; +- __m128i_out = __lsx_vavgr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f80000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7f80000000000007; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000700000007; +- __m128i_out = __lsx_vmaxi_w(__m128i_op0,7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x317fce80317fce80; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmul_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[2]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[1]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_op1[0]) = 0x0808080808080808; +- *((unsigned long*)& __m256i_result[3]) = 0x0807f7f80807f7f8; +- *((unsigned long*)& __m256i_result[2]) = 0x0807f7f80807f7f8; +- *((unsigned long*)& __m256i_result[1]) = 0x0807f7f80807f7f8; +- *((unsigned long*)& __m256i_result[0]) = 0x0807f7f80807f7f8; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x1400080008000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x1400080008000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x1400080008000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x1400080008000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmul_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000501ffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000701ffffce; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000501ffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000701ffffce; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsubwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x000000080000000b; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000000b; +- __m128i_out = __lsx_vaddi_du(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa2e3a36363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0xa2e3a36463636363; +- *((unsigned long*)& __m128i_op1[1]) = 0x7f80000000000007; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000700000007; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000e32c50e; +- *((unsigned long*)& __m128i_result[0]) = 0xf2b2ce330e32c50e; +- __m128i_out = __lsx_vdiv_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x1400080008000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsll_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmini_bu(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x7f800000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000008; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslt_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmin_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvrepl128vei_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffe8440000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffe8440000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xffffffffe8440000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xffffffffe8440000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffe8440000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffe8440000; +- __m256i_out = __lasx_xvmaddwev_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwev_q_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000014; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000014; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000014; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000014; +- __m256i_out = __lasx_xvmaxi_du(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslt_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsigncov_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrm_w_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[0]) = 0x0000020000000200; +- __m256i_out = __lasx_xvfclass_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffe0001fffe0001; +- __m128i_out = __lsx_vmulwod_w_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfcvth_d_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsle_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x1); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00ff00ff00ff00ff; +- __m128i_out = __lsx_vhsubw_hu_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmaddwod_w_hu_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvbitclri_w(__m256i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xf5fffc00; +- *((int*)& __m256_op0[6]) = 0xfc000000; +- *((int*)& __m256_op0[5]) = 0xf5fffc00; +- *((int*)& __m256_op0[4]) = 0xfc000000; +- *((int*)& __m256_op0[3]) = 0xf5fffc00; +- *((int*)& __m256_op0[2]) = 0xfc000000; +- *((int*)& __m256_op0[1]) = 0xf5fffc00; +- *((int*)& __m256_op0[0]) = 0xfc000000; +- *((unsigned long*)& __m256i_result[3]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_result[2]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_result[1]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_result[0]) = 0xf5fffc00fc000000; +- __m256i_out = __lasx_xvfrintrz_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x4f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe00; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe00; +- __m256i_out = __lasx_xvsubwod_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x4f4f4f4f; +- *((int*)& __m128_op0[2]) = 0x4f4f4f4f; +- *((int*)& __m128_op0[1]) = 0x4f4f4f4f; +- *((int*)& __m128_op0[0]) = 0x4f4f4f4f; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000cf4f4f00; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000cf4f4f00; +- __m128i_out = __lsx_vftintrzh_l_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000000cf4f4f00; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000cf4f4f00; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_q_du_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vadd_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_bu(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslt_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vdiv_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfcmp_caf_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrneh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x8080808080808080; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff80ffffff80ff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000018080807f; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001ffff80fe; +- __m128i_out = __lsx_vaddwev_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_vext2xv_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0xffffffffffff8a35; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_slt_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- unsigned_long_int_result = 0x0000000000000000; +- unsigned_long_int_out = __lasx_xvpickve2gr_du(__m256i_op0,0x3); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffed; +- __m256i_out = __lasx_xvsubi_du(__m256i_op0,0x13); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_b_h(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cult_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000007ffffffce; +- __m256i_out = __lasx_xvavgr_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[3]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrlrn_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; +- __m128i_out = __lsx_vmaddwod_d_wu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_cune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000020000000200; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op2[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op2[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op2[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op2[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[3]) = 0x0000020000010201; +- *((unsigned long*)& __m256i_result[2]) = 0x0000020000010201; +- *((unsigned long*)& __m256i_result[1]) = 0x0000020000010201; +- *((unsigned long*)& __m256i_result[0]) = 0x0000020000010201; +- __m256i_out = __lasx_xvmaddwod_h_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000ffffffed; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000ffffffed; +- __m256i_out = __lasx_xvilvl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffe7ffffffe7; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f4f00004f4f0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f4f00004f4f0000; +- unsigned_int_result = 0x000000004f4f0000; +- unsigned_int_out = __lsx_vpickve2gr_wu(__m128i_op0,0x0); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvbitrev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; +- __m128i_out = __lsx_vsub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x9c83e21a22001818; +- *((unsigned long*)& __m128i_op1[0]) = 0xdd3b8b02563b2d7b; +- *((unsigned long*)& __m128i_op2[1]) = 0x000000009c83e21a; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000022001818; +- *((unsigned long*)& __m128i_result[1]) = 0xf2c97aaa7d8fa270; +- *((unsigned long*)& __m128i_result[0]) = 0x0b73e427f7cfcb88; +- __m128i_out = __lsx_vmaddwev_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x7f7f7f007f7f7f00; +- __m128i_out = __lsx_vssran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x9c83e21a22001818; +- *((unsigned long*)& __m128d_op0[0]) = 0xdd3b8b02563b2d7b; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x7f7f7f007f7f7f00; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x7f7f7f007f7f7f00; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op1[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000001c; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000001de; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000001c; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001de; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000060000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000060000000; +- __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x44); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf5fffc00fc000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0001001900010019; +- *((unsigned long*)& __m256i_op1[2]) = 0x0a02041904010019; +- *((unsigned long*)& __m256i_op1[1]) = 0x0001001900010019; +- *((unsigned long*)& __m256i_op1[0]) = 0x0a02041904010019; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000007b007e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000007b007e; +- __m256i_out = __lasx_xvssrlrn_hu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffed; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffeffed; +- __m256i_out = __lasx_xvbitrevi_d(__m256i_op0,0x10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000007b007e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000007b007e; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffe700000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffe7007b007e; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffe700000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffe7007b007e; +- __m256i_out = __lasx_xvpackev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256d_result[3]) = 0xc039000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0xc039000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0xc039000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0xc039000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000009c83e21a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000022001818; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff00000000; +- __m128i_out = __lsx_vslti_hu(__m128i_op0,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000400000004000; +- __m256i_out = __lasx_xvsrari_w(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffffffeffed; +- *((unsigned long*)& __m256i_op2[3]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_op2[1]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xbf3ffffffffeffed; +- *((unsigned long*)& __m256i_result[2]) = 0xbf3ffffffffeffed; +- *((unsigned long*)& __m256i_result[1]) = 0xbf3ffffffffeffed; +- *((unsigned long*)& __m256i_result[0]) = 0xbf3ffffffffeffed; +- __m256i_out = __lasx_xvmaddwod_h_bu(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xc03b000200020002; +- *((unsigned long*)& __m256i_result[2]) = 0xc03b000200020002; +- *((unsigned long*)& __m256i_result[1]) = 0xc03b000200020002; +- *((unsigned long*)& __m256i_result[0]) = 0xc03b000200020002; +- __m256i_out = __lasx_xvbitrevi_h(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7f7f7f007f7f7f00; +- *((unsigned long*)& __m128i_op1[1]) = 0xf2c97aaa7d8fa270; +- *((unsigned long*)& __m128i_op1[0]) = 0x0b73e427f7cfcb88; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffb1fb1000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xf2c97aaa7d8fa270; +- *((unsigned long*)& __m128i_op1[0]) = 0x0b73e427f7cfcb88; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarni_w_d(__m128i_op0,__m128i_op1,0x3f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x9c83e21a22001818; +- *((unsigned long*)& __m128i_op0[0]) = 0xdd3b8b02563b2d7b; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128i_result[0]) = 0x00012c8a0000a58a; +- __m128i_out = __lsx_vhaddw_wu_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000007b00f9007e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000007b00f9007e; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000007b00f9007e; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000007b00f9007e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000000f601f200fc; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000000f601f200fc; +- __m256i_out = __lasx_xvsadd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffe7ffffffe7; +- *((unsigned long*)& __m256i_op1[3]) = 0xbf3ffffffffeffed; +- *((unsigned long*)& __m256i_op1[2]) = 0xbf3ffffffffeffed; +- *((unsigned long*)& __m256i_op1[1]) = 0xbf3ffffffffeffed; +- *((unsigned long*)& __m256i_op1[0]) = 0xbf3ffffffffeffed; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000009c83e21a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000022001818; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000e21a00001818; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x0000000000000000; +- __m256d_out = __lasx_xvffinth_d_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000400000004000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000400000004000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000400000004000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000100; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000100; +- __m256i_out = __lasx_xvfclass_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x000000009c83e21a; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000022001818; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftint_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf2c97aaa7d8fa270; +- *((unsigned long*)& __m128i_op0[0]) = 0x0b73e427f7cfcb88; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; +- *((unsigned long*)& __m128i_result[1]) = 0xf654ad7447e59090; +- *((unsigned long*)& __m128i_result[0]) = 0x27b1b106b8145f50; +- __m128i_out = __lsx_vmulwev_w_hu_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xf654ad7447e59090; +- *((unsigned long*)& __m128i_op1[0]) = 0x27b1b106b8145f50; +- *((unsigned long*)& __m128i_result[1]) = 0x0a545374471b7070; +- *((unsigned long*)& __m128i_result[0]) = 0x274f4f0648145f50; +- __m128i_out = __lsx_vabsd_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0a545374471b7070; +- *((unsigned long*)& __m128i_op0[0]) = 0x274f4f0648145f50; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_result[1]) = 0xa8a736e19e9e28bf; +- *((unsigned long*)& __m128i_result[0]) = 0x9e9f9e9f9e9f9e9f; +- __m128i_out = __lsx_vsrarni_h_w(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000400000004000; +- __m256i_out = __lasx_xvsllwil_w_h(__m256i_op0,0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; +- __m128i_out = __lsx_vpickev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128i_result[0]) = 0x00012c8a0000a58a; +- __m128i_out = __lsx_vmax_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000007b007e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000007b007e; +- *((unsigned long*)& __m256i_op1[3]) = 0xc03b000200020002; +- *((unsigned long*)& __m256i_op1[2]) = 0xc03b000200020002; +- *((unsigned long*)& __m256i_op1[1]) = 0xc03b000200020002; +- *((unsigned long*)& __m256i_op1[0]) = 0xc03b000200020002; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000001ec020; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000001ec020; +- __m256i_out = __lasx_xvssrarn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; +- *((unsigned long*)& __m128i_result[1]) = 0x09e009e009e009e0; +- *((unsigned long*)& __m128i_result[0]) = 0x09e009e009e009e0; +- __m128i_out = __lsx_vsllwil_h_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xa8a74bff9e9e0070; +- *((unsigned long*)& __m128i_op0[0]) = 0x9e9e72ff9e9ff9ff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; +- __m128i_out = __lsx_vsat_du(__m128i_op0,0x2f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op1[3]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0xc039000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf654ad7447e59090; +- *((unsigned long*)& __m128i_op0[0]) = 0x27b1b106b8145f50; +- *((unsigned long*)& __m128i_result[1]) = 0x000000120000000d; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000e0000000e; +- __m128i_out = __lsx_vpcnt_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000ffffffffffff; +- __m128i_out = __lsx_vsrl_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000120000000d; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000e0000000e; +- unsigned_long_int_result = 0x0000000e0000000e; +- unsigned_long_int_out = __lsx_vpickve2gr_du(__m128i_op0,0x0); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op2[1]) = 0x000000120000000d; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000e0000000e; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000cfffffff2; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000dfffffff1; +- __m128i_out = __lsx_vmaddwev_d_wu_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0001000c; +- *((int*)& __m128_op0[2]) = 0xfffffff2; +- *((int*)& __m128_op0[1]) = 0x0001000d; +- *((int*)& __m128_op0[0]) = 0xfffffff1; +- *((int*)& __m128_op1[3]) = 0xffff8a17; +- *((int*)& __m128_op1[2]) = 0xffffc758; +- *((int*)& __m128_op1[1]) = 0xffff69bb; +- *((int*)& __m128_op1[0]) = 0xffffad3b; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_sne_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000120000000d; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000e0000000e; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000011ffee; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000dfff2; +- __m128i_out = __lsx_vmaddwod_d_w(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000120000000d; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000e0000000e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000001; +- __m128i_out = __lsx_vsat_bu(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000007b007e; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000007b007e; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000007b007e; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000007b007e; +- __m256i_out = __lasx_xvmaxi_d(__m256i_op0,2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x00000007ffffffce; +- __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000000011ffee; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000dfff2; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xf654ad7447e59090; +- *((unsigned long*)& __m128i_op1[0]) = 0x27b1b106b8145f50; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffb81a6f70; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000047eba0b0; +- __m128i_out = __lsx_vsubwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256d_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00000000000000e7; +- *((unsigned long*)& __m256d_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256d_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cne_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128i_op0[0]) = 0x00012c8a0000a58a; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffb81a6f70; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000d48eaa1a2; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffb81ae0bf; +- *((unsigned long*)& __m128i_result[0]) = 0x00012c9748eaffff; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000001de2dc20; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000001de2dc20; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128d_op0[0]) = 0x00012c8a0000a58a; +- *((unsigned long*)& __m128d_op1[1]) = 0xf654ad7447e59090; +- *((unsigned long*)& __m128d_op1[0]) = 0x27b1b106b8145f50; +- *((unsigned long*)& __m128d_result[1]) = 0xf654ad7447e59090; +- *((unsigned long*)& __m128d_result[0]) = 0x27b1b106b8145f50; +- __m128d_out = __lsx_vfadd_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0xffffffe7; +- *((int*)& __m256_op0[6]) = 0xffffffe7; +- *((int*)& __m256_op0[5]) = 0xffffffe7; +- *((int*)& __m256_op0[4]) = 0xffffffe7; +- *((int*)& __m256_op0[3]) = 0xffffffe7; +- *((int*)& __m256_op0[2]) = 0xffffffe7; +- *((int*)& __m256_op0[1]) = 0xffffffe7; +- *((int*)& __m256_op0[0]) = 0xffffffe7; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfmin_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000500000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000700000032; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000500000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000700000032; +- __m256i_out = __lasx_xvadda_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000040e7; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000004000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000040e7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000200000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000200000000000; +- __m256i_out = __lasx_xvsrani_d_q(__m256i_op0,__m256i_op1,0x21); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_h_w(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000000ff; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000011ffee; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000dfff2; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000000000ff; +- __m128i_out = __lsx_vaddwod_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf654ad7447e59090; +- *((unsigned long*)& __m128i_op0[0]) = 0x27b1b106b8145f50; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_wu_d(__m128i_op0,__m128i_op1,0x3f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000010000; +- __m128i_out = __lsx_vpickod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000000e7; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000000001fe; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000001ce; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000000001fe; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000001ce; +- __m256i_out = __lasx_xvaddwev_w_hu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000001fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000001ce; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000001fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001ce; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000000001fd; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000000001fd; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_result[1]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00003fff00003fff; +- __m256i_out = __lasx_xvsrli_w(__m256i_op0,0x12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0xf6548a1747e59090; +- *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_result[1]) = 0xf6548a1747e59090; +- *((unsigned long*)& __m128i_result[0]) = 0x27b169bbb8145f50; +- __m128i_out = __lsx_vmax_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000005; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000400000003ffb; +- *((unsigned long*)& __m256i_result[2]) = 0x0000400100004001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000400000003ffb; +- *((unsigned long*)& __m256i_result[0]) = 0x0000400100004001; +- __m256i_out = __lasx_xvabsd_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000019001c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000019001c; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001fe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001fe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpermi_q(__m256i_op0,__m256i_op1,0xb9); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128d_op0[0]) = 0x00012c8a0000a58a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslei_du(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf6548a1747e59090; +- *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_op1[1]) = 0xf6548a1747e59090; +- *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_result[1]) = 0xf6548a1747e59090; +- *((unsigned long*)& __m128i_result[0]) = 0x27b169bbb8145f50; +- __m128i_out = __lsx_vmin_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfrecip_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf6548a1747e59090; +- *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000047e59090; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffb8145f50; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xd3d3d3d3d3d3d3d3; +- *((unsigned long*)& __m256i_result[2]) = 0xd3d3d3d3d3d3d3d3; +- *((unsigned long*)& __m256i_result[1]) = 0xd3d3d3d3d3d3d3d3; +- *((unsigned long*)& __m256i_result[0]) = 0xd3d3d3d3d3d3d3d3; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0xd3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf6548a1747e59090; +- *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_op1[1]) = 0xf6548a1747e59090; +- *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmod_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000800000008; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000800000008; +- __m256i_out = __lasx_xvaddi_wu(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256d_op0[2]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256d_op0[1]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256d_op0[0]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op2[3]) = 0xd3d3d3d3d3d3d3d3; +- *((unsigned long*)& __m256d_op2[2]) = 0xd3d3d3d3d3d3d3d3; +- *((unsigned long*)& __m256d_op2[1]) = 0xd3d3d3d3d3d3d3d3; +- *((unsigned long*)& __m256d_op2[0]) = 0xd3d3d3d3d3d3d3d3; +- *((unsigned long*)& __m256d_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_result[0]) = 0xffffffffffffffff; +- __m256d_out = __lasx_xvfmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[0]) = 0xfffefffefffefffe; +- __m256i_out = __lasx_xvbitclri_h(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x17c64aaef639f093; +- *((unsigned long*)& __m128d_op0[0]) = 0xdb8f439722ec502d; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x17c64aaef639f093; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m128_op0[3]) = 0xf6548a17; +- *((int*)& __m128_op0[2]) = 0x47e59090; +- *((int*)& __m128_op0[1]) = 0x27b169bb; +- *((int*)& __m128_op0[0]) = 0xb8145f50; +- *((int*)& __m128_op1[3]) = 0x004eff62; +- *((int*)& __m128_op1[2]) = 0x00d2ff76; +- *((int*)& __m128_op1[1]) = 0xff700028; +- *((int*)& __m128_op1[0]) = 0x00be00a0; +- *((int*)& __m128_result[3]) = 0xb7032c34; +- *((int*)& __m128_result[2]) = 0x093d35ab; +- *((int*)& __m128_result[1]) = 0xe7a6533b; +- *((int*)& __m128_result[0]) = 0x800001b8; +- __m128_out = __lsx_vfmul_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xb7032c34093d35ab; +- *((unsigned long*)& __m128i_op0[0]) = 0xe7a6533b800001b8; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000900000009; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000900000009; +- __m128i_out = __lsx_vmini_wu(__m128i_op0,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100003ffe; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100003fcd; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100003ffe; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100003fcd; +- __m256i_out = __lasx_xvhaddw_du_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000900000009; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000900000009; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000090; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000090; +- __m128i_out = __lsx_vsllwil_wu_hu(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000400000003ffb; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000400100004001; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000400000003ffb; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000400100004001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000400000003ffb; +- *((unsigned long*)& __m256i_result[2]) = 0x0000400100004001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000400000003ffb; +- *((unsigned long*)& __m256i_result[0]) = 0x0000400100004001; +- __m256i_out = __lasx_xvmin_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0xfffefffe; +- *((int*)& __m256_op0[6]) = 0xfffefffe; +- *((int*)& __m256_op0[5]) = 0xfffefffe; +- *((int*)& __m256_op0[4]) = 0xfffefffe; +- *((int*)& __m256_op0[3]) = 0xfffefffe; +- *((int*)& __m256_op0[2]) = 0xfffefffe; +- *((int*)& __m256_op0[1]) = 0xfffefffe; +- *((int*)& __m256_op0[0]) = 0xfffefffe; +- *((int*)& __m256_op1[7]) = 0x000023a3; +- *((int*)& __m256_op1[6]) = 0x00003fff; +- *((int*)& __m256_op1[5]) = 0x000023a3; +- *((int*)& __m256_op1[4]) = 0x00003fef; +- *((int*)& __m256_op1[3]) = 0x000023a3; +- *((int*)& __m256_op1[2]) = 0x00003fff; +- *((int*)& __m256_op1[1]) = 0x000023a3; +- *((int*)& __m256_op1[0]) = 0x00003fef; +- *((int*)& __m256_result[7]) = 0xfffefffe; +- *((int*)& __m256_result[6]) = 0xfffefffe; +- *((int*)& __m256_result[5]) = 0xfffefffe; +- *((int*)& __m256_result[4]) = 0xfffefffe; +- *((int*)& __m256_result[3]) = 0xfffefffe; +- *((int*)& __m256_result[2]) = 0xfffefffe; +- *((int*)& __m256_result[1]) = 0xfffefffe; +- *((int*)& __m256_result[0]) = 0xfffefffe; +- __m256_out = __lasx_xvfsub_s(__m256_op0,__m256_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x004eff6200d2ff76; +- *((unsigned long*)& __m128i_op1[0]) = 0xff70002800be00a0; +- *((unsigned long*)& __m128i_result[1]) = 0x004eff6200d2ff76; +- *((unsigned long*)& __m128i_result[0]) = 0xff70002800be00a0; +- __m128i_out = __lsx_vsadd_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op1[3]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op1[1]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003fff00003fff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xfffebffffffebfff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffebffffffebfff; +- __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000090; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000090; +- *((unsigned long*)& __m128d_op1[1]) = 0x004eff6200d2ff76; +- *((unsigned long*)& __m128d_op1[0]) = 0xff70002800be00a0; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0xff800000; +- __m128_out = __lsx_vfcvt_s_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000400000003ffb; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000400100004001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000400000003ffb; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000400100004001; +- *((unsigned long*)& __m256i_result[3]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_result[2]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_result[1]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_result[0]) = 0x00003ff000003ff0; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x17c64aaef639f093; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0xf6548a1747e59090; +- *((unsigned long*)& __m128i_op2[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_result[1]) = 0x10f881a20ffd02b0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ff800000; +- __m128i_out = __lsx_vmadd_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000005ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000007ffffffce; +- *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_result[3]) = 0x00003fea00013feb; +- *((unsigned long*)& __m256i_result[2]) = 0x00003fe900014022; +- *((unsigned long*)& __m256i_result[1]) = 0x00003fea00013feb; +- *((unsigned long*)& __m256i_result[0]) = 0x00003fe900014022; +- __m256i_out = __lasx_xvabsd_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000005858585a; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000005858585a; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000005858585a; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000005858585a; +- *((unsigned long*)& __m256i_op1[3]) = 0x000023a300003fff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000023a300003fef; +- *((unsigned long*)& __m256i_op1[1]) = 0x000023a300003fff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000023a300003fef; +- *((unsigned long*)& __m256i_result[3]) = 0x000011d1ac2c4c2d; +- *((unsigned long*)& __m256i_result[2]) = 0x000011d1ac2c4c25; +- *((unsigned long*)& __m256i_result[1]) = 0x000011d1ac2c4c2d; +- *((unsigned long*)& __m256i_result[0]) = 0x000011d1ac2c4c25; +- __m256i_out = __lasx_xvavgr_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128i_op0[0]) = 0x00012c8a0000a58a; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000ebd20000714f; +- *((unsigned long*)& __m128i_op1[0]) = 0x00012c8a0000a58a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffff0000e29e; +- *((unsigned long*)& __m128i_result[0]) = 0x000259140000ffff; +- __m128i_out = __lsx_vsadd_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000008e8c000; +- *((unsigned long*)& __m256d_op0[2]) = 0x000000000fffc000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000008e8c000; +- *((unsigned long*)& __m256d_op0[0]) = 0x000000000fffc000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_lu_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvpcnt_w(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op1[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256d_op1[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256d_op1[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256d_op1[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cult_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000400000004000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00003feec0108022; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003fe9c015802c; +- *((unsigned long*)& __m256i_op1[1]) = 0x00003feec0108022; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003fe9c015802c; +- *((unsigned long*)& __m256i_result[3]) = 0x00007f124010c022; +- *((unsigned long*)& __m256i_result[2]) = 0x00007f174015c02c; +- *((unsigned long*)& __m256i_result[1]) = 0x00007f124010c022; +- *((unsigned long*)& __m256i_result[0]) = 0x00007f174015c02c; +- __m256i_out = __lasx_xvadda_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x08e8c000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x0fffc000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x08e8c000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x0fffc000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00003fea; +- *((int*)& __m256_op0[6]) = 0x00013feb; +- *((int*)& __m256_op0[5]) = 0x00003fe9; +- *((int*)& __m256_op0[4]) = 0x00014022; +- *((int*)& __m256_op0[3]) = 0x00003fea; +- *((int*)& __m256_op0[2]) = 0x00013feb; +- *((int*)& __m256_op0[1]) = 0x00003fe9; +- *((int*)& __m256_op0[0]) = 0x00014022; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvfrint_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftinth_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrari_b(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrarn_b_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0xffffffff; +- *((int*)& __m256_op1[6]) = 0xffffffff; +- *((int*)& __m256_op1[5]) = 0xffffffff; +- *((int*)& __m256_op1[4]) = 0xffffffff; +- *((int*)& __m256_op1[3]) = 0xffffffff; +- *((int*)& __m256_op1[2]) = 0xffffffff; +- *((int*)& __m256_op1[1]) = 0xffffffff; +- *((int*)& __m256_op1[0]) = 0xffffffff; +- *((int*)& __m256_op2[7]) = 0x00000000; +- *((int*)& __m256_op2[6]) = 0x00000000; +- *((int*)& __m256_op2[5]) = 0x00000000; +- *((int*)& __m256_op2[4]) = 0x00000000; +- *((int*)& __m256_op2[3]) = 0x00000000; +- *((int*)& __m256_op2[2]) = 0x00000000; +- *((int*)& __m256_op2[1]) = 0x00000000; +- *((int*)& __m256_op2[0]) = 0x00000000; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffffff; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffffff; +- __m256_out = __lasx_xvfmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x02b010f881a281a2; +- *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; +- __m128i_out = __lsx_vmini_hu(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op1[1]) = 0x02b010f881a281a2; +- *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_result[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_result[0]) = 0x0002000200020002; +- __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvslti_w(__m256i_op0,11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x02b010f881a281a2; +- *((unsigned long*)& __m128i_op0[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_op1[1]) = 0x02b010f881a281a2; +- *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8145f50; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x02b010f881a281a2; +- *((unsigned long*)& __m128i_result[0]) = 0x27b169bbb8140001; +- __m128i_out = __lsx_vfrstp_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrlrni_hu_w(__m256i_op0,__m256i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x02b010f881a281a2; +- *((unsigned long*)& __m128i_op1[0]) = 0x27b169bbb8140001; +- *((unsigned long*)& __m128i_result[1]) = 0x000010f8000081a2; +- *((unsigned long*)& __m128i_result[0]) = 0x000069bb00000001; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000010f8000081a2; +- *((unsigned long*)& __m128i_op0[0]) = 0x000069bb00000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001000010f8; +- __m128i_out = __lsx_vshuf4i_w(__m128i_op0,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x10f881a20ffd02b0; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x10f881a20ffd02b0; +- *((unsigned long*)& __m128d_result[0]) = 0x00000000ff800000; +- __m128d_out = __lsx_vfmax_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftint_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x7ff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x7ff0000000000000; +- __m128d_out = __lsx_vfrsqrt_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x10f881a20ffd02b0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0xf1f181a2f1f1f1b0; +- *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1f180f1f1; +- __m128i_out = __lsx_vmini_b(__m128i_op0,-15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000001fffc8027; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000001fffc7ff1; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000001fffc8027; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000001fffc7ff1; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000100000014; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000014; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000100000014; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000014; +- __m256i_out = __lasx_xvmini_wu(__m256i_op0,0x14); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x10f881a20ffd02b0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0xfff8ffa2fffdffb0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ff800000; +- __m128i_out = __lsx_vaddwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[3]) = 0x1d1d1d1e1d1d1d1e; +- *((unsigned long*)& __m256i_result[2]) = 0x1d1d1d1e1d1d1d1e; +- *((unsigned long*)& __m256i_result[1]) = 0x1d1d1d1e1d1d1d1e; +- *((unsigned long*)& __m256i_result[0]) = 0x1d1d1d1e1d1d1d1e; +- __m256i_out = __lasx_xvaddi_bu(__m256i_op0,0x1d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x10f8000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001000010f8; +- *((unsigned long*)& __m128i_result[1]) = 0x10f8000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x00000001000010f8; +- __m128i_out = __lsx_vsadd_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001000010f8; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff8ffa2fffdffb0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0f0f0f0f00000f00; +- __m128i_out = __lsx_vssrlni_bu_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_wu_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xf1f181a2; +- *((int*)& __m128_op0[2]) = 0xf1f1f1b0; +- *((int*)& __m128_op0[1]) = 0xf1f1f1f1; +- *((int*)& __m128_op0[0]) = 0xf180f1f1; +- *((int*)& __m128_result[3]) = 0x7fc00000; +- *((int*)& __m128_result[2]) = 0x7fc00000; +- *((int*)& __m128_result[1]) = 0x7fc00000; +- *((int*)& __m128_result[0]) = 0x7fc00000; +- __m128_out = __lsx_vflogb_s(__m128_op0); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_result[3]) = 0x00007fde00007fd4; +- *((unsigned long*)& __m256i_result[2]) = 0x00007fe000007fe0; +- *((unsigned long*)& __m256i_result[1]) = 0x00007fde00007fd4; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fe000007fe0; +- __m256i_out = __lasx_xvaddwev_w_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00007fde00007fd4; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007fe000007fe0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00007fde00007fd4; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007fe000007fe0; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffff7eddffff7ed3; +- *((unsigned long*)& __m256i_result[2]) = 0xffff7edfffff7edf; +- *((unsigned long*)& __m256i_result[1]) = 0xffff7eddffff7ed3; +- *((unsigned long*)& __m256i_result[0]) = 0xffff7edfffff7edf; +- __m256i_out = __lasx_xvsadd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslti_h(__m128i_op0,15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff7eddffff7ed3; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff7edfffff7edf; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff7eddffff7ed3; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff7edfffff7edf; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff00007edd; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff00007ed3; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff00007edf; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff00007edf; +- __m256i_out = __lasx_vext2xv_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x10f8000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrlrni_h_w(__m128i_op0,__m128i_op1,0x1e); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xfff8ffa2fffdffb0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128i_result[1]) = 0x0108015e01030150; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000017f0000; +- __m128i_out = __lsx_vssub_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x10f8000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xfff8ffa2fffdffb0; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128d_result[1]) = 0x10f8000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x00000000ff800000; +- __m128d_out = __lsx_vfmaxa_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvsle_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff8ffa2fffdffb0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_du_q(__m128i_op0,__m128i_op1,0x50); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00007fde00007fd4; +- *((unsigned long*)& __m256i_op0[2]) = 0x00007fe000007fe0; +- *((unsigned long*)& __m256i_op0[1]) = 0x00007fde00007fd4; +- *((unsigned long*)& __m256i_op0[0]) = 0x00007fe000007fe0; +- *((unsigned long*)& __m256i_result[3]) = 0x000081220000812c; +- *((unsigned long*)& __m256i_result[2]) = 0x0000812000008120; +- *((unsigned long*)& __m256i_result[1]) = 0x000081220000812c; +- *((unsigned long*)& __m256i_result[0]) = 0x0000812000008120; +- __m256i_out = __lasx_xvneg_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff7eddffff7ed3; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff7edfffff7edf; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff7eddffff7ed3; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff7edfffff7edf; +- *((unsigned long*)& __m256i_op1[3]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op1[2]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_op1[1]) = 0x00003fef00003fea; +- *((unsigned long*)& __m256i_op1[0]) = 0x00003ff000003ff0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff3eedffff3ee3; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff3eedffff3ee3; +- __m256i_out = __lasx_xvhsubw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x10f8000100000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000001000010f8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x087c000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000087c; +- __m128i_out = __lsx_vavg_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00003fea00013fec; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fe50001c013; +- *((unsigned long*)& __m256i_op0[1]) = 0x00003fea00013fec; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fe50001c013; +- *((unsigned long*)& __m256i_result[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[2]) = 0x000000ff0000ff00; +- *((unsigned long*)& __m256i_result[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_result[0]) = 0x000000ff0000ff00; +- __m256i_out = __lasx_xvsat_b(__m256i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x10f881a20ffd02b0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ff800000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0ff780a10efc01af; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fe7f0000; +- __m128i_out = __lsx_vmuh_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x0ff780a1; +- *((int*)& __m128_op0[2]) = 0x0efc01af; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0xfe7f0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000fe7f0000; +- __m128i_out = __lsx_vfrintrne_s(__m128_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x087c000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000087c; +- *((unsigned long*)& __m128i_result[1]) = 0xf784000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffff784; +- __m128i_out = __lsx_vneg_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_wu(__m128i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; +- __m256i_out = __lasx_xvssrlrni_b_h(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x087c000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000000087c; +- *((unsigned long*)& __m128i_op1[1]) = 0x10f8000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001000010f8; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffff784; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000081220000812c; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000812000008120; +- *((unsigned long*)& __m256i_op0[1]) = 0x000081220000812c; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000812000008120; +- *((unsigned long*)& __m256i_result[3]) = 0xe9e968c9e9e968c1; +- *((unsigned long*)& __m256i_result[2]) = 0xe9e968c9e9e968c9; +- *((unsigned long*)& __m256i_result[1]) = 0xe9e968c9e9e968c1; +- *((unsigned long*)& __m256i_result[0]) = 0xe9e968c9e9e968c9; +- __m256i_out = __lasx_xvnori_b(__m256i_op0,0x16); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000081220000812c; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000812000008120; +- *((unsigned long*)& __m256i_op0[1]) = 0x000081220000812c; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000812000008120; +- *((unsigned long*)& __m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; +- *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffefffff784; +- *((unsigned long*)& __m128i_op1[1]) = 0x10f8000100000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000001000010f8; +- *((unsigned long*)& __m128i_result[1]) = 0x0177fff0fffffff0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000011ff8bc; +- __m128i_out = __lsx_vmod_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256d_op0[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256d_op0[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256d_op0[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256d_op1[3]) = 0x00003fea00013fec; +- *((unsigned long*)& __m256d_op1[2]) = 0x00003fe50001c013; +- *((unsigned long*)& __m256d_op1[1]) = 0x00003fea00013fec; +- *((unsigned long*)& __m256d_op1[0]) = 0x00003fe50001c013; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000180000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000180000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000100000001; +- __m256i_out = __lasx_xvftintrp_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe013fcf2e015fc38; +- *((unsigned long*)& __m256i_op0[2]) = 0xe013fd00dff78420; +- *((unsigned long*)& __m256i_op0[1]) = 0xe013fcf2e015fc38; +- *((unsigned long*)& __m256i_op0[0]) = 0xe013fd00dff78420; +- *((unsigned long*)& __m256i_op1[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op1[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op1[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssran_bu_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00003fea0014734d; +- *((unsigned long*)& __m256i_op0[2]) = 0x00003fe900140d85; +- *((unsigned long*)& __m256i_op0[1]) = 0x00003fea0014734d; +- *((unsigned long*)& __m256i_op0[0]) = 0x00003fe900140d85; +- *((unsigned long*)& __m256i_op1[3]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[2]) = 0x000000ff0000ff00; +- *((unsigned long*)& __m256i_op1[1]) = 0x000000ff000000ff; +- *((unsigned long*)& __m256i_op1[0]) = 0x000000ff0000ff00; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrarni_du_q(__m256i_op0,__m256i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0177fff0fffffff0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff8bc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffefffff784; +- *((unsigned long*)& __m128i_result[1]) = 0x00bbfff7fffffff7; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff008ff820; +- __m128i_out = __lsx_vavg_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_result[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000050005; +- *((unsigned long*)& __m256i_result[0]) = 0xfefefefefefefefe; +- __m256i_out = __lasx_xvinsgr2vr_w(__m256i_op0,int_op1,0x3); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xfffffffe; +- *((int*)& __m128_op0[0]) = 0xfffff784; +- *((int*)& __m128_op1[3]) = 0x0177fff0; +- *((int*)& __m128_op1[2]) = 0xfffffff0; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x011ff8bc; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sun_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfd02fd02; +- *((int*)& __m256_op0[6]) = 0xfd02fd02; +- *((int*)& __m256_op0[5]) = 0xfd02fd02; +- *((int*)& __m256_op0[4]) = 0xfd02fd02; +- *((int*)& __m256_op0[3]) = 0xfd02fd02; +- *((int*)& __m256_op0[2]) = 0xfd02fd02; +- *((int*)& __m256_op0[1]) = 0xfd02fd02; +- *((int*)& __m256_op0[0]) = 0xfd02fd02; +- *((int*)& __m256_result[7]) = 0x81fa28e4; +- *((int*)& __m256_result[6]) = 0x81fa28e4; +- *((int*)& __m256_result[5]) = 0x81fa28e4; +- *((int*)& __m256_result[4]) = 0x81fa28e4; +- *((int*)& __m256_result[3]) = 0x81fa28e4; +- *((int*)& __m256_result[2]) = 0x81fa28e4; +- *((int*)& __m256_result[1]) = 0x81fa28e4; +- *((int*)& __m256_result[0]) = 0x81fa28e4; +- __m256_out = __lasx_xvfrecip_s(__m256_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmin_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00bbfff7fffffff7; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff008ff820; +- *((unsigned long*)& __m128i_op1[1]) = 0x00bbfff7fffffff7; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffff008ff820; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000011ff040; +- __m128i_out = __lsx_vaddwev_d_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0005000500050005; +- *((unsigned long*)& __m256i_op0[2]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000050005; +- *((unsigned long*)& __m256i_op0[0]) = 0xfefefefefefefefe; +- *((unsigned long*)& __m256i_op1[3]) = 0xf007fe76f008fe19; +- *((unsigned long*)& __m256i_op1[2]) = 0xf08aff01f07cc291; +- *((unsigned long*)& __m256i_op1[1]) = 0xf007fe76f008fe19; +- *((unsigned long*)& __m256i_op1[0]) = 0xf08aff01f07cc291; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000001400; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000003c01ff9; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000003c01ff9; +- __m256i_out = __lasx_xvsrlni_d_q(__m256i_op0,__m256i_op1,0x66); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x0177fff0fffffff0; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000011ff8bc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0177fff0fffffff0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff8bc; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsadd_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000011f0000f040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0177fff0fffffff0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff8bc; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffffff; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000001400; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000003c01ff9; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000003c01ff9; +- *((unsigned long*)& __m256i_op1[3]) = 0xfffffffff08a7de0; +- *((unsigned long*)& __m256i_op1[2]) = 0xfffffffff07c4170; +- *((unsigned long*)& __m256i_op1[1]) = 0xfffffffff08a7de0; +- *((unsigned long*)& __m256i_op1[0]) = 0xfffffffff07c4170; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffff08a7de0; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffff07c4170; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffff08a7de0; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffff07c4170; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_vext2xv_d_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000001400; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000003c01ff9; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000003c01ff9; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffec00; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffc3fe007; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffc3fe007; +- __m256i_out = __lasx_xvsubwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff040; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe0000ff18; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; +- __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0177fff0fffffff0; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff8bc; +- *((unsigned long*)& __m128i_op2[1]) = 0x00bbfff7fffffff7; +- *((unsigned long*)& __m128i_op2[0]) = 0xffffffff008ff820; +- *((unsigned long*)& __m128i_result[1]) = 0xffe8008fffe7008f; +- *((unsigned long*)& __m128i_result[0]) = 0x00010001f1153780; +- __m128i_out = __lsx_vmaddwev_w_hu(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m128d_op0[0]) = 0x00000000011ff040; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvslti_h(__m256i_op0,-11); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffee; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010012; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffe1ffc0; +- __m128i_out = __lsx_vsubwev_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00bbfff7fffffff7; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff008ff820; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe1ffc0; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffff009ff83f; +- __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000011ff040; +- __m128i_out = __lsx_vhaddw_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x81fa28e4; +- *((int*)& __m256_op0[6]) = 0x81fa28e4; +- *((int*)& __m256_op0[5]) = 0x81fa28e4; +- *((int*)& __m256_op0[4]) = 0x81fa28e4; +- *((int*)& __m256_op0[3]) = 0x81fa28e4; +- *((int*)& __m256_op0[2]) = 0x81fa28e4; +- *((int*)& __m256_op0[1]) = 0x81fa28e4; +- *((int*)& __m256_op0[0]) = 0x81fa28e4; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrz_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x0); +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000001400; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000003c01ff9; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000003c01ff9; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000001400; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000003c01ff9; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000003c01ff9; +- __m256i_out = __lasx_xvsadd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_w_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010012; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffe1ffc0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe1ffc0; +- *((unsigned long*)& __m128i_result[1]) = 0x0001000100010012; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000ffe1ffc0; +- __m128i_out = __lsx_vsigncov_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vsle_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op0[2]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op0[1]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op0[0]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_result[2]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_result[1]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_result[0]) = 0xfd02fd02fd02fd02; +- __m256i_out = __lasx_xvsrl_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffe4ffe4ffe4ffe4; +- *((unsigned long*)& __m128i_result[0]) = 0xffe4ffe4ffe4ffe4; +- __m128i_out = __lsx_vsubi_hu(__m128i_op0,0x1b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffe4ffe4ffe4ffe4; +- *((unsigned long*)& __m128i_op0[0]) = 0xffe4ffe4ffe4ffe4; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000011ff040; +- *((unsigned long*)& __m128i_result[1]) = 0xff00e400ff00e400; +- *((unsigned long*)& __m128i_result[0]) = 0xff01e41ffff0e440; +- __m128i_out = __lsx_vilvl_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsat_du(__m256i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op0[2]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op0[1]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op0[0]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_result[3]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_result[2]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_result[1]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_result[0]) = 0xfd12fd12fd12fd12; +- __m256i_out = __lasx_xvbitseti_b(__m256i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op0[2]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op0[1]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op0[0]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op1[3]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op1[2]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op1[1]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op1[0]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0xfa15fa15fa15fa14; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0xfa15fa15fa15fa14; +- __m256i_out = __lasx_xvhaddw_qu_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfa15fa15fa15fa14; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfa15fa15fa15fa14; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[0]) = 0x05ea05ea05ea05ec; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0xffffffffffffffff; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x2); +- *((int*)& __m128_op0[3]) = 0xffffffff; +- *((int*)& __m128_op0[2]) = 0xffffffff; +- *((int*)& __m128_op0[1]) = 0xffffffff; +- *((int*)& __m128_op0[0]) = 0xffffffff; +- *((int*)& __m128_op1[3]) = 0xffffffff; +- *((int*)& __m128_op1[2]) = 0xffffffff; +- *((int*)& __m128_op1[1]) = 0xffffffff; +- *((int*)& __m128_op1[0]) = 0xffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_sune_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0177fff0fffffff0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000011ff8bc; +- *((unsigned long*)& __m128i_result[1]) = 0x05dfffc3ffffffc0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000047fe2f0; +- __m128i_out = __lsx_vslli_d(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x05dfffc3ffffffc0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000047fe2f0; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000047fe2f0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000047fe2f0; +- __m128i_out = __lsx_vreplve_d(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvreplve0_b(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvfrintrne_d(__m256d_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010012; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fec20704; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000012; +- __m128i_out = __lsx_vexth_wu_hu(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xdfdfdfdfdfdfdfdf; +- *((unsigned long*)& __m128i_result[0]) = 0xdfdfdfdfdfdfdfdf; +- __m128i_out = __lsx_vbitclri_b(__m128i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000047fe2f0; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000047fe2f0; +- *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000fec20704; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000043fe2fc; +- *((unsigned long*)& __m128i_result[0]) = 0x00000000001fffff; +- __m128i_out = __lsx_vsra_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe011df03e; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xf03ef03ef03ef03e; +- *((unsigned long*)& __m128i_result[0]) = 0xf03ef03ef03ef03e; +- __m128i_out = __lsx_vreplve_h(__m128i_op0,int_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x00000000047fe2f0; +- *((unsigned long*)& __m128d_op1[0]) = 0x00000000047fe2f0; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_w_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf03ef03ef03ef03e; +- *((unsigned long*)& __m128i_op0[0]) = 0xf03ef03ef03ef03e; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vslei_d(__m128i_op0,-9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0xfd12fd12; +- *((int*)& __m256_op0[6]) = 0xfd12fd12; +- *((int*)& __m256_op0[5]) = 0xfd12fd12; +- *((int*)& __m256_op0[4]) = 0xfd12fd12; +- *((int*)& __m256_op0[3]) = 0xfd12fd12; +- *((int*)& __m256_op0[2]) = 0xfd12fd12; +- *((int*)& __m256_op0[1]) = 0xfd12fd12; +- *((int*)& __m256_op0[0]) = 0xfd12fd12; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000080000000; +- __m256i_out = __lasx_xvftintrne_w_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_result[2]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_result[1]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000001; +- __m256i_out = __lasx_xvpermi_d(__m256i_op0,0x49); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010012; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fec20704; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vclo_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op0[2]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op0[1]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op0[0]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_result[3]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256i_result[2]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256i_result[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256i_result[0]) = 0x000a000a000a000a; +- __m256i_out = __lasx_xvmaxi_h(__m256i_op0,10); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0xfa15fa15fa15fa14; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0xfa15fa15fa15fa14; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffefffffffe; +- __m128i_out = __lsx_vaddwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; +- *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0e440; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffe4ffffffe4ff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffe4fffff0e4ff; +- __m128i_out = __lsx_vmin_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_h(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256d_op1[2]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256d_op1[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256d_op1[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrne_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128d_op0[0]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128d_op1[1]) = 0xfffefffefffefffe; +- *((unsigned long*)& __m128d_op1[0]) = 0xfffefffe011df03e; +- *((unsigned long*)& __m128d_result[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128d_result[0]) = 0xfffffffefffffffe; +- __m128d_out = __lsx_vfdiv_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; +- *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0e440; +- *((unsigned long*)& __m128i_op1[1]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffefffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xff01e420fff0e442; +- __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[3]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fffffff80000000; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[2]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[0]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_op1[3]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op1[2]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op1[1]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_op1[0]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x04f104f104f104f1; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x04f104f104f104f1; +- __m256i_out = __lasx_xvmulwod_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x8000000080000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvpickev_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000080000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000808ff821; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vaddwod_d_wu_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256d_op1[2]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256d_op1[1]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256d_op1[0]) = 0xfd02fd02fd02fd02; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op1[2]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op1[1]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_op1[0]) = 0xfd12fd12fd12fd12; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmulwev_d_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vreplvei_w(__m128i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x7fffffff; +- *((int*)& __m256_op0[6]) = 0x80000000; +- *((int*)& __m256_op0[5]) = 0x7fffffff; +- *((int*)& __m256_op0[4]) = 0x80000000; +- *((int*)& __m256_op0[3]) = 0x7fffffff; +- *((int*)& __m256_op0[2]) = 0x80000000; +- *((int*)& __m256_op0[1]) = 0x7fffffff; +- *((int*)& __m256_op0[0]) = 0x80000000; +- *((int*)& __m256_op1[7]) = 0xfd02fd02; +- *((int*)& __m256_op1[6]) = 0xfd02fd02; +- *((int*)& __m256_op1[5]) = 0xfd02fd02; +- *((int*)& __m256_op1[4]) = 0xfd02fd02; +- *((int*)& __m256_op1[3]) = 0xfd02fd02; +- *((int*)& __m256_op1[2]) = 0xfd02fd02; +- *((int*)& __m256_op1[1]) = 0xfd02fd02; +- *((int*)& __m256_op1[0]) = 0xfd02fd02; +- *((int*)& __m256_op2[7]) = 0xfd02fd02; +- *((int*)& __m256_op2[6]) = 0xfd02fd02; +- *((int*)& __m256_op2[5]) = 0xfd02fd02; +- *((int*)& __m256_op2[4]) = 0xfd02fd02; +- *((int*)& __m256_op2[3]) = 0xfd02fd02; +- *((int*)& __m256_op2[2]) = 0xfd02fd02; +- *((int*)& __m256_op2[1]) = 0xfd02fd02; +- *((int*)& __m256_op2[0]) = 0xfd02fd02; +- *((int*)& __m256_result[7]) = 0x7fffffff; +- *((int*)& __m256_result[6]) = 0x7d02fd02; +- *((int*)& __m256_result[5]) = 0x7fffffff; +- *((int*)& __m256_result[4]) = 0x7d02fd02; +- *((int*)& __m256_result[3]) = 0x7fffffff; +- *((int*)& __m256_result[2]) = 0x7d02fd02; +- *((int*)& __m256_result[1]) = 0x7fffffff; +- *((int*)& __m256_result[0]) = 0x7d02fd02; +- __m256_out = __lasx_xvfnmadd_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0c0c0c0c0c0c0c0c; +- *((unsigned long*)& __m256i_result[2]) = 0x0c0c0c0c0c0c0c0c; +- *((unsigned long*)& __m256i_result[1]) = 0x0c0c0c0c0c0c0c0c; +- *((unsigned long*)& __m256i_result[0]) = 0x0c0c0c0c0c0c0c0c; +- __m256i_out = __lasx_xvmaxi_b(__m256i_op0,12); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xffffff7fffffff7f; +- *((unsigned long*)& __m128i_result[0]) = 0xffffff7fffffff7f; +- __m128i_out = __lsx_vbitrevi_w(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- int_op0 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplgr2vr_b(int_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_sune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff01fe03ff01fe03; +- *((unsigned long*)& __m128i_op0[0]) = 0xff01fe03ff01fe03; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xff01fe03ff01fe03; +- __m128i_out = __lsx_vsrarn_w_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff4; +- __m128i_out = __lsx_vmini_d(__m128i_op0,-12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclr_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[2]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op1[0]) = 0x05ea05ea05ea05ec; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xfa15fa15fa15fa14; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xfa15fa15fa15fa14; +- __m256i_out = __lasx_xvsub_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xff01fe03ff01fe03; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vseq_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffff0000; +- __m256i_out = __lasx_xvsrlrni_b_h(__m256i_op0,__m256i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256i_op1[2]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256i_op1[1]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256i_op1[0]) = 0x000a000a000a000a; +- *((unsigned long*)& __m256i_result[3]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_result[2]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_result[1]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_result[0]) = 0x0004000500040005; +- __m256i_out = __lasx_xvavg_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op0[0]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0001000300000004; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000300000004; +- *((unsigned long*)& __m256i_result[1]) = 0x0001000300000004; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000300000004; +- __m256i_out = __lasx_xvhaddw_wu_hu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0000ffff; +- __m256i_out = __lasx_xvexth_wu_hu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m128d_op1[0]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_ceq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xfa15fa15fa15fa14; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xfa15fa15fa15fa14; +- *((unsigned long*)& __m256i_result[3]) = 0x8282828282828282; +- *((unsigned long*)& __m256i_result[2]) = 0x8768876887688769; +- *((unsigned long*)& __m256i_result[1]) = 0x8282828282828282; +- *((unsigned long*)& __m256i_result[0]) = 0x8768876887688769; +- __m256i_out = __lasx_xvxori_b(__m256i_op0,0x7d); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffff4; +- *((unsigned long*)& __m128i_result[1]) = 0x000000200000001c; +- *((unsigned long*)& __m128i_result[0]) = 0x000000200000001c; +- __m128i_out = __lsx_vclo_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000200000001c; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000200000001c; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000200000001c; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000200000001c; +- *((unsigned long*)& __m128i_result[1]) = 0x00000020000000c0; +- *((unsigned long*)& __m128i_result[0]) = 0x00000020000000c0; +- __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; +- *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0ffff; +- *((unsigned long*)& __m128i_result[1]) = 0xff00e400ff00e400; +- *((unsigned long*)& __m128i_result[0]) = 0xff01e41ffff0ffff; +- __m128i_out = __lsx_vmini_d(__m128i_op0,14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op0[0]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op1[3]) = 0x8282828282828282; +- *((unsigned long*)& __m256i_op1[2]) = 0x8768876887688769; +- *((unsigned long*)& __m256i_op1[1]) = 0x8282828282828282; +- *((unsigned long*)& __m256i_op1[0]) = 0x8768876887688769; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000104000200; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000104000200; +- __m256i_out = __lasx_xvsrarn_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256i_result[2]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256i_result[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256i_result[0]) = 0x00007fff00007fff; +- __m256i_out = __lasx_xvpackod_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000104000200; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000104000200; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op2[2]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op2[1]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op2[0]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_result[3]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_result[2]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_result[1]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_result[0]) = 0x0004000500040005; +- __m256i_out = __lasx_xvshuf_w(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0xff00e400ff00e400; +- *((unsigned long*)& __m128d_op0[0]) = 0xff01e41ffff0ffff; +- *((unsigned long*)& __m128d_op1[1]) = 0x5555000054100000; +- *((unsigned long*)& __m128d_op1[0]) = 0x5555000154100155; +- *((unsigned long*)& __m128d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0xfff0000000000000; +- __m128d_out = __lsx_vfmadd_d(__m128d_op0,__m128d_op1,__m128d_op2); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrari_h(__m128i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vslt_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0x0001000000000000; +- __m128i_out = __lsx_vsubwev_q_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op0[2]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op0[1]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op0[0]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op1[3]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op1[2]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op1[0]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvssrani_du_q(__m256i_op0,__m256i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fffffff80000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsran_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x9f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssub_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x8282828282828282; +- *((unsigned long*)& __m256i_op1[2]) = 0x8768876887688769; +- *((unsigned long*)& __m256i_op1[1]) = 0x8282828282828282; +- *((unsigned long*)& __m256i_op1[0]) = 0x8768876887688769; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x00000000003fffc0; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x00000000003fffc0; +- __m256i_out = __lasx_xvssran_wu_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8282828282828282; +- *((unsigned long*)& __m256i_op0[2]) = 0x8768876887688769; +- *((unsigned long*)& __m256i_op0[1]) = 0x8282828282828282; +- *((unsigned long*)& __m256i_op0[0]) = 0x8768876887688769; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x00000000003fffc0; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x00000000003fffc0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffc00040; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffc00040; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op1[2]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op1[1]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_op1[0]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_op2[3]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256i_op2[2]) = 0x00007fff00000000; +- *((unsigned long*)& __m256i_op2[1]) = 0x00007fff00007fff; +- *((unsigned long*)& __m256i_op2[0]) = 0x00007fff00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff10; +- __m256i_out = __lasx_xvfrstp_b(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x5555000054100000; +- *((unsigned long*)& __m128i_op1[0]) = 0x5555000154100155; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000155; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; +- *((unsigned long*)& __m128i_op0[0]) = 0xff01e41ffff0ffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xff01ffffe41f0000; +- *((unsigned long*)& __m128i_result[0]) = 0xfff00000ffff0000; +- __m128i_out = __lsx_vilvl_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff80007fff0000; +- __m256i_out = __lasx_xvor_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xfff0008000000080; +- *((unsigned long*)& __m128i_result[0]) = 0xfff0008000000080; +- __m128i_out = __lsx_vbitseti_w(__m128i_op0,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vrotri_h(__m128i_op0,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vseq_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000003fffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000003fffc0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000ffffffff; +- __m256i_out = __lasx_xvslt_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000155; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; +- __m128i_out = __lsx_vaddwod_q_du_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00040004; +- *((int*)& __m256_op0[6]) = 0x00040004; +- *((int*)& __m256_op0[5]) = 0x00040005; +- *((int*)& __m256_op0[4]) = 0x00040005; +- *((int*)& __m256_op0[3]) = 0x00040004; +- *((int*)& __m256_op0[2]) = 0x00040004; +- *((int*)& __m256_op0[1]) = 0x00040005; +- *((int*)& __m256_op0[0]) = 0x00040005; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrzh_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff01ffffe41f0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfff00000ffff0000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000100000155; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000000000000002b; +- __m128i_out = __lsx_vssrarni_bu_h(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff00e400ff00e400; +- *((unsigned long*)& __m128i_op0[0]) = 0xfee1f6f18800ff7f; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarni_hu_w(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffe4ffffffe4; +- __m256i_out = __lasx_xvsubi_wu(__m256i_op0,0x1c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssrani_bu_h(__m256i_op0,__m256i_op1,0xf); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000155; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000100000155; +- __m128i_out = __lsx_vmax_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvexth_hu_bu(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffc00040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffc00040; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x1080108010060002; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x1080108010060002; +- __m256i_out = __lasx_xvsrlr_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256i_op1[2]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256i_op1[1]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256i_op1[0]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256i_result[3]) = 0x0000001d0000001c; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001d0000001c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001d0000001c; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001d0000001c; +- __m256i_out = __lasx_xvsubwev_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x1080108010060002; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x1080108010060002; +- *((unsigned long*)& __m256d_op1[3]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256d_op1[2]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256d_op1[1]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256d_op1[0]) = 0xffffffe4ffffffe4; +- *((unsigned long*)& __m256d_op2[3]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256d_op2[2]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256d_op2[1]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256d_op2[0]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256d_result[3]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256d_result[2]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256d_result[1]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256d_result[0]) = 0x7fff00017fff0000; +- __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrani_bu_h(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x545501550001113a; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xd45501550001113a; +- __m128i_out = __lsx_vbitrev_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[0]) = 0x0010001000100010; +- __m128i_out = __lsx_vclz_h(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000155; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xffff100000000000; +- __m128i_out = __lsx_vfrstp_b(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfefe000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000155; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000100000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_h_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff100000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x000f000000000000; +- __m128i_out = __lsx_vmod_hu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff00017fff0000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_result[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_result[0]) = 0x04f104f104f504ed; +- __m256i_out = __lasx_xvshuf4i_d(__m256i_op0,__m256i_op1,0x7e); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op0[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op0[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0002ffff00020002; +- *((unsigned long*)& __m256i_result[2]) = 0x04f504f104f504f5; +- *((unsigned long*)& __m256i_result[1]) = 0x0002ffff00020002; +- *((unsigned long*)& __m256i_result[0]) = 0x04f504f104f504f5; +- __m256i_out = __lasx_xvshuf4i_h(__m256i_op0,0x65); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((int*)& __m256_op1[7]) = 0x00000000; +- *((int*)& __m256_op1[6]) = 0x00000000; +- *((int*)& __m256_op1[5]) = 0x7fff8000; +- *((int*)& __m256_op1[4]) = 0x7fff0000; +- *((int*)& __m256_op1[3]) = 0x00000000; +- *((int*)& __m256_op1[2]) = 0x00000000; +- *((int*)& __m256_op1[1]) = 0x7fff8000; +- *((int*)& __m256_op1[0]) = 0x7fff0000; +- *((int*)& __m256_op2[7]) = 0xffffffff; +- *((int*)& __m256_op2[6]) = 0xffffffff; +- *((int*)& __m256_op2[5]) = 0xffffffff; +- *((int*)& __m256_op2[4]) = 0xffffff10; +- *((int*)& __m256_op2[3]) = 0xffffffff; +- *((int*)& __m256_op2[2]) = 0xffffffff; +- *((int*)& __m256_op2[1]) = 0xffffffff; +- *((int*)& __m256_op2[0]) = 0xffffff10; +- *((int*)& __m256_result[7]) = 0xffffffff; +- *((int*)& __m256_result[6]) = 0xffffffff; +- *((int*)& __m256_result[5]) = 0xffffffff; +- *((int*)& __m256_result[4]) = 0xffffff10; +- *((int*)& __m256_result[3]) = 0xffffffff; +- *((int*)& __m256_result[2]) = 0xffffffff; +- *((int*)& __m256_result[1]) = 0xffffffff; +- *((int*)& __m256_result[0]) = 0xffffff10; +- __m256_out = __lasx_xvfnmsub_s(__m256_op0,__m256_op1,__m256_op2); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vrotr_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000155; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000f0000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffff10000; +- __m128i_out = __lsx_vhsubw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff80007fff0000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_result[2]) = 0x7fff81007fff0100; +- *((unsigned long*)& __m256i_result[1]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_result[0]) = 0x7fff81007fff0100; +- __m256i_out = __lasx_xvbitrevi_w(__m256i_op0,0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff81007fff0100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000010000000100; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff81007fff0100; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000008000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0003fffc0803fff8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000008000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0003fffc0803fff8; +- __m256i_out = __lasx_xvsrari_d(__m256i_op0,0xd); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000008000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0003fffc0803fff8; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000008000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0003fffc0803fff8; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000fffc0000fff8; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000fffc0000fff8; +- __m256i_out = __lasx_xvpackev_h(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_h(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff100000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000000f0000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwod_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpickod_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[2]) = 0x0004000400040004; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_result[0]) = 0x0004000400040004; +- __m256i_out = __lasx_xvaddwod_h_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x00000000; +- *((int*)& __m256_result[4]) = 0x00000000; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x00000000; +- *((int*)& __m256_result[0]) = 0x00000000; +- __m256_out = __lasx_xvffint_s_wu(__m256i_op0); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffff10000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_d(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0002ffff00020002; +- *((unsigned long*)& __m256i_op0[2]) = 0x04f504f104f504f5; +- *((unsigned long*)& __m256i_op0[1]) = 0x0002ffff00020002; +- *((unsigned long*)& __m256i_op0[0]) = 0x04f504f104f504f5; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x000200ff00020002; +- *((unsigned long*)& __m256i_result[2]) = 0x00f500f100f500f5; +- *((unsigned long*)& __m256i_result[1]) = 0x000200ff00020002; +- *((unsigned long*)& __m256i_result[0]) = 0x00f500f100f500f5; +- __m256i_out = __lasx_xvaddwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[2]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256d_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256d_op0[0]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256d_op1[3]) = 0x0004000400040004; +- *((unsigned long*)& __m256d_op1[2]) = 0x0004000500040005; +- *((unsigned long*)& __m256d_op1[1]) = 0x0004000400040004; +- *((unsigned long*)& __m256d_op1[0]) = 0x0004000500040005; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfrintrne_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vextrins_d(__m128i_op0,__m128i_op1,0x8a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0000ffff0002fffc; +- *((unsigned long*)& __m256i_result[2]) = 0xffff0000fffd0003; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0002fffc; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffd0003; +- __m256i_out = __lasx_xvmulwod_q_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsll_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvabsd_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsran_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmax_du(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_seq_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vpackev_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0002fffc; +- *((unsigned long*)& __m256i_op0[2]) = 0xffff0000fffd0003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffc; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000fffd0003; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0001fffe0005fff9; +- *((unsigned long*)& __m256i_result[2]) = 0x04f004f204f204f0; +- *((unsigned long*)& __m256i_result[1]) = 0x0001fffe0005fff9; +- *((unsigned long*)& __m256i_result[0]) = 0x04f004f204f204f0; +- __m256i_out = __lasx_xvsadd_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmulwev_h_bu_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0005fff9; +- *((unsigned long*)& __m256i_op0[2]) = 0x04f004f204f204f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0005fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0x04f004f204f204f0; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000900000009; +- __m256i_out = __lasx_xvsrai_w(__m256i_op0,0x17); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001fffe0005fff9; +- *((unsigned long*)& __m256i_op0[2]) = 0x04f004f204f204f0; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001fffe0005fff9; +- *((unsigned long*)& __m256i_op0[0]) = 0x04f004f204f204f0; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000002780; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000002780; +- __m256i_out = __lasx_xvsrl_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrne_l_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_w(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_h_bu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002780; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002780; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffd880; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffd880; +- __m256i_out = __lasx_xvneg_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vexth_qu_du(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0001000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x03fc03fc03fc03fc; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vftintrz_lu_d(__m128d_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffd880; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffd880; +- int_result = 0x0000000000000000; +- int_out = __lasx_xvpickve2gr_w(__m256i_op0,0x2); +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vneg_w(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_op0[2]) = 0x03acfc5303260e80; +- *((unsigned long*)& __m256i_op0[1]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_op0[0]) = 0x03acfc5303260e80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000002780; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000002780; +- *((unsigned long*)& __m256i_result[3]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_result[2]) = 0x03acfc5303260e81; +- *((unsigned long*)& __m256i_result[1]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_result[0]) = 0x03acfc5303260e81; +- __m256i_out = __lasx_xvbitset_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000002780; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000002780; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0000010100020103; +- *((unsigned long*)& __m256i_result[2]) = 0x040f040f040b236d; +- *((unsigned long*)& __m256i_result[1]) = 0x0000010100020103; +- *((unsigned long*)& __m256i_result[0]) = 0x040f040f040b236d; +- __m256i_out = __lasx_xvabsd_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op0[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op0[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[3]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_op1[2]) = 0x03acfc5303260e80; +- *((unsigned long*)& __m256i_op1[1]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_op1[0]) = 0x03acfc5303260e80; +- *((unsigned long*)& __m256i_result[3]) = 0x00000b0cfffff4f3; +- *((unsigned long*)& __m256i_result[2]) = 0x000f9bb562f56c80; +- *((unsigned long*)& __m256i_result[1]) = 0x00000b0cfffff4f3; +- *((unsigned long*)& __m256i_result[0]) = 0x000f9bb562f56c80; +- __m256i_out = __lasx_xvmulwev_d_wu_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvclo_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmaxa_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_result[0]) = 0x0000000000000000; +- __m128d_out = __lsx_vfmul_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128d_result, __m128d_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsle_wu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_op0[2]) = 0x03acfc5303260e81; +- *((unsigned long*)& __m256i_op0[1]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_op0[0]) = 0x03acfc5303260e81; +- *((unsigned long*)& __m256i_op1[3]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_op1[2]) = 0x03acfc5303260e81; +- *((unsigned long*)& __m256i_op1[1]) = 0x03af03af03af03af; +- *((unsigned long*)& __m256i_op1[0]) = 0x03acfc5303260e81; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvsrarni_h_w(__m256i_op0,__m256i_op1,0x1b); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubi_du(__m128i_op0,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_qu_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x1716151417161514; +- *((unsigned long*)& __m256d_op0[2]) = 0x1716151417161514; +- *((unsigned long*)& __m256d_op0[1]) = 0x1716151417161514; +- *((unsigned long*)& __m256d_op0[0]) = 0x1716151417161514; +- *((unsigned long*)& __m256d_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[2]) = 0x0000000000002780; +- *((unsigned long*)& __m256d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[0]) = 0x0000000000002780; +- *((unsigned long*)& __m256d_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[2]) = 0x0000000000002780; +- *((unsigned long*)& __m256d_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op2[0]) = 0x0000000000002780; +- *((unsigned long*)& __m256d_result[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[2]) = 0x8000000000002780; +- *((unsigned long*)& __m256d_result[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256d_result[0]) = 0x8000000000002780; +- __m256d_out = __lasx_xvfnmadd_d(__m256d_op0,__m256d_op1,__m256d_op2); +- ASSERTEQ_64(__LINE__, __m256d_result, __m256d_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwev_q_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vreplvei_h(__m128i_op0,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmin_du(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x8a8a8a8a8a8a8a8a; +- *((unsigned long*)& __m128i_result[0]) = 0x8a8a8a8a8a8a8a8a; +- __m128i_out = __lsx_vori_b(__m128i_op0,0x8a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x8a8a8a8a8a8a8a8a; +- *((unsigned long*)& __m128i_op1[0]) = 0x8a8a8a8a8a8a8a8a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_wu_d(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x8a8a8a8a; +- *((int*)& __m128_op1[2]) = 0x8a8a8a8a; +- *((int*)& __m128_op1[1]) = 0x8a8a8a8a; +- *((int*)& __m128_op1[0]) = 0x8a8a8a8a; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_caf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vslei_h(__m128i_op0,-10); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vfcmp_saf_s(__m128_op0,__m128_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m256_op0[7]) = 0x00000000; +- *((int*)& __m256_op0[6]) = 0x00000000; +- *((int*)& __m256_op0[5]) = 0x00000000; +- *((int*)& __m256_op0[4]) = 0x00000000; +- *((int*)& __m256_op0[3]) = 0x00000000; +- *((int*)& __m256_op0[2]) = 0x00000000; +- *((int*)& __m256_op0[1]) = 0x00000000; +- *((int*)& __m256_op0[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrnel_l_s(__m256_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_op1[3]) = 0x1716151417161514; +- *((unsigned long*)& __m256i_op1[2]) = 0x1716151417161514; +- *((unsigned long*)& __m256i_op1[1]) = 0x1716151417161514; +- *((unsigned long*)& __m256i_op1[0]) = 0x1716151417161514; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0fff0fff0fff0fff; +- __m256i_out = __lasx_xvsrln_h_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[2]) = 0x0000000000002780; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op2[0]) = 0x0000000000002780; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvmaddwod_w_h(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffff10; +- *((unsigned long*)& __m256i_result[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[2]) = 0x3fff3fff3fff3fc4; +- *((unsigned long*)& __m256i_result[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256i_result[0]) = 0x3fff3fff3fff3fc4; +- __m256i_out = __lasx_xvsrli_h(__m256i_op0,0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000b0cfffff4f3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000f9bb562f56c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000b0cfffff4f3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000f9bb562f56c80; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op2[3]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op2[2]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_op2[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op2[0]) = 0x04f104f104f504ed; +- *((unsigned long*)& __m256i_result[3]) = 0x0018761ed60b5d7f; +- *((unsigned long*)& __m256i_result[2]) = 0xabdcdc9938afafe9; +- *((unsigned long*)& __m256i_result[1]) = 0x0018761ed60b5d7f; +- *((unsigned long*)& __m256i_result[0]) = 0xabdcdc9938afafe9; +- __m256i_out = __lasx_xvmaddwev_q_du_d(__m256i_op0,__m256i_op1,__m256i_op2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsubwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128d_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; +- __m128i_out = __lsx_vfcmp_seq_d(__m128d_op0,__m128d_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((int*)& __m128_op0[3]) = 0x00000000; +- *((int*)& __m128_op0[2]) = 0x00000000; +- *((int*)& __m128_op0[1]) = 0x00000000; +- *((int*)& __m128_op0[0]) = 0x00000000; +- *((int*)& __m128_op1[3]) = 0x00000000; +- *((int*)& __m128_op1[2]) = 0x00000000; +- *((int*)& __m128_op1[1]) = 0x00000000; +- *((int*)& __m128_op1[0]) = 0x00000000; +- *((int*)& __m128_result[3]) = 0x00000000; +- *((int*)& __m128_result[2]) = 0x00000000; +- *((int*)& __m128_result[1]) = 0x00000000; +- *((int*)& __m128_result[0]) = 0x00000000; +- __m128_out = __lsx_vfmax_s(__m128_op0,__m128_op1); +- ASSERTEQ_32(__LINE__, __m128_result, __m128_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256d_op1[3]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256d_op1[2]) = 0x3fff3fff3fff3fc4; +- *((unsigned long*)& __m256d_op1[1]) = 0x3fff3fff3fff3fff; +- *((unsigned long*)& __m256d_op1[0]) = 0x3fff3fff3fff3fc4; +- *((int*)& __m256_result[7]) = 0x00000000; +- *((int*)& __m256_result[6]) = 0x00000000; +- *((int*)& __m256_result[5]) = 0x3ff9fffa; +- *((int*)& __m256_result[4]) = 0x3ff9fffa; +- *((int*)& __m256_result[3]) = 0x00000000; +- *((int*)& __m256_result[2]) = 0x00000000; +- *((int*)& __m256_result[1]) = 0x3ff9fffa; +- *((int*)& __m256_result[0]) = 0x3ff9fffa; +- __m256_out = __lasx_xvfcvt_s_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_32(__LINE__, __m256_result, __m256_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmuh_b(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vhaddw_w_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0fff0fff0fff0fff; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvandn_v(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000900000009; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[2]) = 0x3ff9fffa3ff9fffa; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[0]) = 0x3ff9fffa3ff9fffa; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000007ff3; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000007ff3; +- __m256i_out = __lasx_xvsrani_w_d(__m256i_op0,__m256i_op1,0x2f); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((int*)& __m256_op0[7]) = 0x003f0200; +- *((int*)& __m256_op0[6]) = 0x01400200; +- *((int*)& __m256_op0[5]) = 0x003f00ff; +- *((int*)& __m256_op0[4]) = 0x003f00c4; +- *((int*)& __m256_op0[3]) = 0x003f0200; +- *((int*)& __m256_op0[2]) = 0x01400200; +- *((int*)& __m256_op0[1]) = 0x003f00ff; +- *((int*)& __m256_op0[0]) = 0x003f00c4; +- *((int*)& __m256_op1[7]) = 0x00000101; +- *((int*)& __m256_op1[6]) = 0x01010101; +- *((int*)& __m256_op1[5]) = 0x00000000; +- *((int*)& __m256_op1[4]) = 0x00000000; +- *((int*)& __m256_op1[3]) = 0x00000101; +- *((int*)& __m256_op1[2]) = 0x01010101; +- *((int*)& __m256_op1[1]) = 0x00000000; +- *((int*)& __m256_op1[0]) = 0x00000000; +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvfcmp_cune_s(__m256_op0,__m256_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vaddwod_d_wu(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vbitclri_h(__m128i_op0,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vmskltz_d(__m128i_op0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffdbff980038ffaf; +- *((unsigned long*)& __m256i_op0[2]) = 0xffafffe80004fff1; +- *((unsigned long*)& __m256i_op0[1]) = 0xffdbff980038ffaf; +- *((unsigned long*)& __m256i_op0[0]) = 0xffafffe80004fff1; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000ffff0002fffc; +- *((unsigned long*)& __m256i_op1[2]) = 0xffff0000fffd0003; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffc; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000fffd0003; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0000fffd0004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_result[1]) = 0xffff0000fffd0004; +- *((unsigned long*)& __m256i_result[0]) = 0x0000ffff0002fffd; +- __m256i_out = __lasx_xvsigncov_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000020202020202; +- *((unsigned long*)& __m256i_result[2]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000020202020202; +- *((unsigned long*)& __m256i_result[0]) = 0x0101000000010000; +- __m256i_out = __lasx_xvadda_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x761ed60b5d7f0000; +- *((unsigned long*)& __m256i_op0[2]) = 0xdc9938afafe904f1; +- *((unsigned long*)& __m256i_op0[1]) = 0x761ed60b5d7f0000; +- *((unsigned long*)& __m256i_op0[0]) = 0xdc9938afafe904f1; +- *((unsigned long*)& __m256i_result[3]) = 0x03b0feb002eb0000; +- *((unsigned long*)& __m256i_result[2]) = 0xfee401c5fd7f0027; +- *((unsigned long*)& __m256i_result[1]) = 0x03b0feb002eb0000; +- *((unsigned long*)& __m256i_result[0]) = 0xfee401c5fd7f0027; +- __m256i_out = __lasx_xvsrai_h(__m256i_op0,0x5); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffdbff980038ffaf; +- *((unsigned long*)& __m256i_op0[2]) = 0xffafffe80004fff1; +- *((unsigned long*)& __m256i_op0[1]) = 0xffdbff980038ffaf; +- *((unsigned long*)& __m256i_op0[0]) = 0xffafffe80004fff1; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000020202020202; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000020202020202; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000e3fec0004fff1; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000e3fec0004fff1; +- __m256i_out = __lasx_xvsrlrn_w_d(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff0000fffd0004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff0000fffd0004; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_result[3]) = 0xffff0000fffd0004; +- *((unsigned long*)& __m256i_result[2]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_result[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_result[0]) = 0xffff0000fffd0004; +- __m256i_out = __lasx_xvpermi_d(__m256i_op0,0xcb); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vssrarn_b_h(__m128i_op0,__m128i_op1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ff01ff68; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000070ff017de6; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ff01ff68; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000070ff017de6; +- *((unsigned long*)& __m256i_op1[3]) = 0x761ed60b5d7f0000; +- *((unsigned long*)& __m256i_op1[2]) = 0xdc9938afafe904f1; +- *((unsigned long*)& __m256i_op1[1]) = 0x761ed60b5d7f0000; +- *((unsigned long*)& __m256i_op1[0]) = 0xdc9938afafe904f1; +- *((unsigned long*)& __m256i_result[3]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_result[2]) = 0x00004c9000e9d886; +- *((unsigned long*)& __m256i_result[1]) = 0x00000000007f0000; +- *((unsigned long*)& __m256i_result[0]) = 0x00004c9000e9d886; +- __m256i_out = __lasx_xvmulwev_h_bu(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_op1 = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vinsgr2vr_b(__m128i_op0,int_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff0000fffd0004; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op0[0]) = 0xffff0000fffd0004; +- *((unsigned long*)& __m256i_op1[3]) = 0xffff0000fffd0004; +- *((unsigned long*)& __m256i_op1[2]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000ffff0002fffd; +- *((unsigned long*)& __m256i_op1[0]) = 0xffff0000fffd0004; +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffff0; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000000f; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000000f; +- __m256i_out = __lasx_xvssrani_d_q(__m256i_op0,__m256i_op1,0x6c); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_op1[3]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op1[2]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_op1[1]) = 0x0000010101010101; +- *((unsigned long*)& __m256i_op1[0]) = 0x0101000000010000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvssub_b(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256d_op0[3]) = 0x0000ffff0002fffc; +- *((unsigned long*)& __m256d_op0[2]) = 0xffff0000fffd0003; +- *((unsigned long*)& __m256d_op0[1]) = 0x0000ffff0002fffc; +- *((unsigned long*)& __m256d_op0[0]) = 0xffff0000fffd0003; +- *((unsigned long*)& __m256d_op1[3]) = 0x003f020001400200; +- *((unsigned long*)& __m256d_op1[2]) = 0x003f00ff003f00c4; +- *((unsigned long*)& __m256d_op1[1]) = 0x003f020001400200; +- *((unsigned long*)& __m256d_op1[0]) = 0x003f00ff003f00c4; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvftintrm_w_d(__m256d_op0,__m256d_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op2[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vshuf_h(__m128i_op0,__m128i_op1,__m128i_op2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000260a378; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000d02317; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000260a378; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000d02317; +- *((unsigned long*)& __m256i_op1[3]) = 0x003f020001400200; +- *((unsigned long*)& __m256i_op1[2]) = 0x003f00ff003f00c4; +- *((unsigned long*)& __m256i_op1[1]) = 0x003f020001400200; +- *((unsigned long*)& __m256i_op1[0]) = 0x003f00ff003f00c4; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvseq_w(__m256i_op0,__m256i_op1); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x00a300a300a300a3; +- *((unsigned long*)& __m128i_result[0]) = 0x00a300a300a300a3; +- __m128i_out = __lsx_vrepli_h(0xa3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[1]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_result[0]) = 0x0000001000000010; +- __m256i_out = __lasx_xvldi(-4080); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- +- *((unsigned long*)& __m128i_result[1]) = 0xfffffffffffffe15; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffe15; +- __m128i_out = __lsx_vrepli_d(-491); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- +- *((unsigned long*)& __m256i_result[3]) = 0xfebcfebcfebcfebc; +- *((unsigned long*)& __m256i_result[2]) = 0xfebcfebcfebcfebc; +- *((unsigned long*)& __m256i_result[1]) = 0xfebcfebcfebcfebc; +- *((unsigned long*)& __m256i_result[0]) = 0xfebcfebcfebcfebc; +- __m256i_out = __lasx_xvrepli_h(-324); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0xecececececececec; +- *((unsigned long*)& __m128i_result[0]) = 0xecececececececec; +- __m128i_out = __lsx_vrepli_b(-20); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x00ffff00ff00ff00; +- *((unsigned long*)& __m128i_result[0]) = 0x00ffff00ff00ff00; +- __m128i_out = __lsx_vldi(-1686); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x3fd1000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x3fd1000000000000; +- __m256i_out = __lasx_xvldi(-943); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x004d004d004d004d; +- *((unsigned long*)& __m128i_result[0]) = 0x004d004d004d004d; +- __m128i_out = __lsx_vrepli_h(0x4d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[2]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[1]) = 0xff1cff1cff1cff1c; +- *((unsigned long*)& __m256i_result[0]) = 0xff1cff1cff1cff1c; +- __m256i_out = __lasx_xvrepli_h(-228); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_result[2]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_result[1]) = 0x7200000072000000; +- *((unsigned long*)& __m256i_result[0]) = 0x7200000072000000; +- __m256i_out = __lasx_xvldi(-3214); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0xffffff1dffffff1d; +- *((unsigned long*)& __m256i_result[2]) = 0xffffff1dffffff1d; +- *((unsigned long*)& __m256i_result[1]) = 0xffffff1dffffff1d; +- *((unsigned long*)& __m256i_result[0]) = 0xffffff1dffffff1d; +- __m256i_out = __lasx_xvrepli_w(-227); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x0a0000000a000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0a0000000a000000; +- __m128i_out = __lsx_vldi(-3318); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000000000fff8; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000000000ffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff00000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000f0000000f; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000002000000020; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000f0000000f; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000808081; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000808081; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000808081; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000808081; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff1ffca0011feca; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff1ffca0011feca; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000080008000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000007fff7fff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000080008000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000007fff7fff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fff000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fff000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0404040404040404; +- *((unsigned long*)& __m128i_op0[0]) = 0xec68e3ef5a98ed54; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0080000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x195f307a5d04acbb; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xefff000100000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xf000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xefff000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xf000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xc600000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000001fc0000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0400040004000400; +- *((unsigned long*)& __m128i_op0[0]) = 0x0400040004000400; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000001000000010; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001000000010; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000010001; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff01ff01; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfffffffffffffe03; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffe03; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000006; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op0[2]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op0[1]) = 0xe1616161e1614e60; +- *((unsigned long*)& __m256i_op0[0]) = 0xe1616161e1614e60; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00005555aaabfffe; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7fff7fff7fff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff7fff; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfbba01c0003f7e3f; +- *((unsigned long*)& __m256i_op0[2]) = 0xffc6cc05c64d960e; +- *((unsigned long*)& __m256i_op0[1]) = 0xfbd884e7003f7e3f; +- *((unsigned long*)& __m256i_op0[0]) = 0xff874dc687870000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010183f95466; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x01010101d58efe94; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000400; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000400; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x8080808080808080; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x7f0101070101010f; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000127f010116; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xff80ff80ff80ff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xff80ff80ff80ff80; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000200000002; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00010003; +- *((unsigned long*)& __m256i_op0[1]) = 0x0080000200000002; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00010003; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000002bfd9461; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xefffdffff0009d3d; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000010000c; +- *((unsigned long*)& __m128i_op0[0]) = 0x006ffffefff0000d; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000006f00001f0a; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000958affff995d; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[2]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[1]) = 0x4040404040404040; +- *((unsigned long*)& __m256i_op0[0]) = 0x4040404040404040; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000100010001007c; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000001f0000001f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000001f0000ffff; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; +- *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ca0200000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ca0200000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[2]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m256i_op0[0]) = 0xfffffffffffffffe; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0003000300030003; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff082f000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x003f000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0002000200020002; +- *((unsigned long*)& __m128i_op0[0]) = 0x0202fe02fd020102; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x2c2c2c2c2c2c2c2c; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x2c2c2c2c2c2c2c2c; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0a0aa9890a0ac5f3; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffd8ffc7ffdaff8a; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffd8ffc7ffdaff8a; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000b0b100015d1e; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001fffe0001bfff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000b0b100015d1e; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001fffe0001bfff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ff00fe00ff00fe; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ff00fe00ff00fe; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636363; +- *((unsigned long*)& __m128i_op0[0]) = 0x6368d2cd63636363; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x1f001f00000007ef; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00001fff200007ef; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808081; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0038d800ff000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00fffe00fffffe00; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x8000008000008080; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080800000800080; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000002e8b164; +- *((unsigned long*)& __m128i_op0[0]) = 0x199714a038478040; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffe00029f9f6061; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x007f008000ea007f; +- *((unsigned long*)& __m128i_op0[0]) = 0x00ff00ff00ff00ff; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000001; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001fe01fe; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000ff0100; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001fe01fe; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000ff0100; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op0[2]) = 0x7fffffffa2beb040; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000022beb03f; +- *((unsigned long*)& __m256i_op0[0]) = 0x7fffffffa2beb040; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000ffffffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000ffff00000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000ffff00000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xfff10000fff10000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xfff10000fff10000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffff0020001d001f; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x687a8373f249bc44; +- *((unsigned long*)& __m128i_op0[0]) = 0x7861145d9241a14a; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff00000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff00000080; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffff0000ffff0000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000007fff0018; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfdfdfdfdfdfdfdfd; +- *((unsigned long*)& __m256i_op0[2]) = 0xe27fe2821d226278; +- *((unsigned long*)& __m256i_op0[1]) = 0xfdfdfdfdfdfdfdfd; +- *((unsigned long*)& __m256i_op0[0]) = 0xe27fe2821d226278; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00f7000000f70007; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00f7000000f70007; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00ffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000ff00000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00ffffffffffffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_op0[2]) = 0x00e9a80014ff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0x00b213171dff0606; +- *((unsigned long*)& __m256i_op0[0]) = 0x00e9a80014ff0000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000ffff0000ffff; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000008; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000008; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0010001000100010; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010001000100010; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffff7ffffffffe; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fffffffe; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000004870ba0; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_d(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x7ff8000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x7ff8000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xfffffffffe00fe00; +- *((unsigned long*)& __m256i_op0[2]) = 0x000000001fe01dde; +- *((unsigned long*)& __m256i_op0[1]) = 0xfffffffffe00fe00; +- *((unsigned long*)& __m256i_op0[0]) = 0x000000001fe01dde; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000800000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000a0008; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000a0008; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000ffff0000ffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0001fffe0001fffe; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x000000000000ffc0; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000fff0ffc0; +- *((unsigned long*)& __m256i_op0[1]) = 0x000000000000ffc0; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000fff0ffc0; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xcd636363cd636363; +- *((unsigned long*)& __m128i_op0[0]) = 0xcd636363cd636363; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffefff80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000000000b7; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffefff80; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xff808000ff808000; +- *((unsigned long*)& __m256i_op0[2]) = 0xc3038000ff808000; +- *((unsigned long*)& __m256i_op0[1]) = 0xff808000ff808000; +- *((unsigned long*)& __m256i_op0[0]) = 0xc3038000ff808000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000202020200; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000100; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0xe593c8c4e593c8c4; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffff9727ffff9727; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffe79ffffba5f; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_w(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffff60000280; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000f64fab372db5; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffff60000280; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000f64fab372db5; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_h(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffff0000ffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffff0000ffff; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffff0000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffff0000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000021; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000080801030000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000080103040000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbz_d(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x00000000000001f4; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x00000000000001f4; +- int_result = 0x0000000000000001; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_v(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000011ffee; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000dfff2; +- int_result = 0x0000000000000000; +- int_out = __lsx_bnz_b(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf784000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff784; +- int_result = 0x0000000000000000; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000180000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000100000001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000180000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000100000001; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbz_w(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xf784000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff784; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff009ff83f; +- int_result = 0x0000000000000001; +- int_out = __lsx_bnz_h(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- int_result = 0x0000000000000001; +- int_out = __lsx_bz_v(__m128i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000b0cfffff4f3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000f9bb562f56c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000b0cfffff4f3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000f9bb562f56c80; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000b0cfffff4f3; +- *((unsigned long*)& __m256i_op0[2]) = 0x000f9bb562f56c80; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000b0cfffff4f3; +- *((unsigned long*)& __m256i_op0[0]) = 0x000f9bb562f56c80; +- int_result = 0x0000000000000000; +- int_out = __lasx_xbnz_b(__m256i_op0); +- ASSERTEQ_int(__LINE__, int_result, int_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x43ef878780000009; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x43ef878780000009; +- __m256i_out = __lasx_xvextl_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x000201220001011c; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x000201220001011c; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x000201220001011c; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x000201220001011c; +- __m256i_out = __lasx_xvextl_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_q_d(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[1]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0101010101010101; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010101010101; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000001010101; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000100000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000083f95466; +- *((unsigned long*)& __m256i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0101010100005400; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000083f95466; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0101010100005400; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffff; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000080; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m256i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000000; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[1]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_op0[0]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x0001000100010001; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x0001000100010001; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[1]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_op0[0]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[2]) = 0x8000000000000000; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m256i_result[0]) = 0x8000000000000000; +- __m256i_out = __lasx_xvextl_qu_du(__m256i_op0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_result[0]) = 0x3ab7a3fc47a5c31a; +- __m128i_out = __lsx_vld((unsigned long *)&__m128i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_result[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_result[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_result[0]) = 0x0ad152a5ad72feeb; +- __m256i_out = __lasx_xvld((unsigned long *)&__m256i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x0; +- *((unsigned long*)& __m128i_result[0]) = 0x0; +- __lsx_vst(__m128i_op0, (unsigned long *)&__m128i_result, 0x0); +- ASSERTEQ_64(__LINE__, __m128i_op0, __m128i_result); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0x0; +- *((unsigned long*)& __m256i_result[2]) = 0x0; +- *((unsigned long*)& __m256i_result[1]) = 0x0; +- *((unsigned long*)& __m256i_result[0]) = 0x0; +- __lasx_xvst(__m256i_op0, (unsigned long *)&__m256i_result, 0x0); +- ASSERTEQ_64(__LINE__, __m256i_op0, __m256i_result); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_result[0]) = 0x3ab7a3fc47a5c31a; +- __m128i_out = __lsx_vldx((unsigned long *)&__m128i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_result[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_result[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_result[0]) = 0x0ad152a5ad72feeb; +- __m256i_out = __lasx_xvldx((unsigned long *)&__m256i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x0; +- *((unsigned long*)& __m128i_result[0]) = 0x0; +- __lsx_vstx(__m128i_op0, (unsigned long *)&__m128i_result, 0x0); +- ASSERTEQ_64(__LINE__, __m128i_op0, __m128i_result); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0x0; +- *((unsigned long*)& __m256i_result[2]) = 0x0; +- *((unsigned long*)& __m256i_result[1]) = 0x0; +- *((unsigned long*)& __m256i_result[0]) = 0x0; +- __lasx_xvstx(__m256i_op0, (unsigned long *)&__m256i_result, 0x0); +- ASSERTEQ_64(__LINE__, __m256i_op0, __m256i_result); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0xc3c3c3c3c3c3c3c3; +- *((unsigned long*)& __m128i_result[0]) = 0xc3c3c3c3c3c3c3c3; +- __m128i_out = __lsx_vldrepl_b((unsigned long *)&__m128i_op0, 0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[2]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[1]) = 0xebebebebebebebeb; +- *((unsigned long*)& __m256i_result[0]) = 0xebebebebebebebeb; +- __m256i_out = __lasx_xvldrepl_b((unsigned long *)&__m256i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0xc31ac31ac31ac31a; +- *((unsigned long*)& __m128i_result[0]) = 0xc31ac31ac31ac31a; +- __m128i_out = __lsx_vldrepl_h((unsigned long *)&__m128i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0xfeebfeebfeebfeeb; +- *((unsigned long*)& __m256i_result[2]) = 0xfeebfeebfeebfeeb; +- *((unsigned long*)& __m256i_result[1]) = 0xfeebfeebfeebfeeb; +- *((unsigned long*)& __m256i_result[0]) = 0xfeebfeebfeebfeeb; +- __m256i_out = __lasx_xvldrepl_h((unsigned long *)&__m256i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x47a5c31a47a5c31a; +- *((unsigned long*)& __m128i_result[0]) = 0x47a5c31a47a5c31a; +- __m128i_out = __lsx_vldrepl_w((unsigned long *)&__m128i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0xad72feebad72feeb; +- *((unsigned long*)& __m256i_result[2]) = 0xad72feebad72feeb; +- *((unsigned long*)& __m256i_result[1]) = 0xad72feebad72feeb; +- *((unsigned long*)& __m256i_result[0]) = 0xad72feebad72feeb; +- __m256i_out = __lasx_xvldrepl_w((unsigned long *)&__m256i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[0]) = 0x3ab7a3fc47a5c31a; +- __m128i_out = __lsx_vldrepl_d((unsigned long *)&__m128i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[2]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[1]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[0]) = 0x0ad152a5ad72feeb; +- __m256i_out = __lasx_xvldrepl_d((unsigned long *)&__m256i_op0, 0x0); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x000001a8000001a8; +- *((unsigned long*)& __m128i_result[0]) = 0x000001a8000001a8; +- __m128i_out = __lsx_vrepli_w(424); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0x0000011300000113; +- *((unsigned long*)& __m128i_result[0]) = 0x0000011300000113; +- __m128i_out = __lsx_vrepli_w(275); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_result[1]) = 0xfffffee2fffffee2; +- *((unsigned long*)& __m128i_result[0]) = 0xfffffee2fffffee2; +- __m128i_out = __lsx_vrepli_w(-286); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x0; +- *((unsigned long*)& __m128i_result[0]) = 0x05; +- *((unsigned long*)& __m128i_out[1]) = 0x0; +- *((unsigned long*)& __m128i_out[0]) = 0x0; +- __lsx_vstelm_b(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x0; +- *((unsigned long*)& __m128i_result[0]) = 0x5c05; +- *((unsigned long*)& __m128i_out[1]) = 0x0; +- *((unsigned long*)& __m128i_out[0]) = 0x0; +- __lsx_vstelm_h(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x0; +- *((unsigned long*)& __m128i_result[0]) = 0xc9d85c05; +- *((unsigned long*)& __m128i_out[1]) = 0x0; +- *((unsigned long*)& __m128i_out[0]) = 0x0; +- __lsx_vstelm_w(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_op0[0]) = 0x3ab7a3fc47a5c31a; +- *((unsigned long*)& __m128i_result[1]) = 0x0; +- *((unsigned long*)& __m128i_result[0]) = 0x1dcc4255c9d85c05; +- *((unsigned long*)& __m128i_out[1]) = 0x0; +- *((unsigned long*)& __m128i_out[0]) = 0x0; +- __lsx_vstelm_d(__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0x0; +- *((unsigned long*)& __m256i_result[2]) = 0x0; +- *((unsigned long*)& __m256i_result[1]) = 0x0; +- *((unsigned long*)& __m256i_result[0]) = 0x8d; +- *((unsigned long*)& __m256i_out[3]) = 0x0; +- *((unsigned long*)& __m256i_out[2]) = 0x0; +- *((unsigned long*)& __m256i_out[1]) = 0x0; +- *((unsigned long*)& __m256i_out[0]) = 0x0; +- __lasx_xvstelm_b(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0xe); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0x0; +- *((unsigned long*)& __m256i_result[2]) = 0x0; +- *((unsigned long*)& __m256i_result[1]) = 0x0; +- *((unsigned long*)& __m256i_result[0]) = 0x9100; +- *((unsigned long*)& __m256i_out[3]) = 0x0; +- *((unsigned long*)& __m256i_out[2]) = 0x0; +- *((unsigned long*)& __m256i_out[1]) = 0x0; +- *((unsigned long*)& __m256i_out[0]) = 0x0; +- __lasx_xvstelm_h(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x8); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0x0; +- *((unsigned long*)& __m256i_result[2]) = 0x0; +- *((unsigned long*)& __m256i_result[1]) = 0x0; +- *((unsigned long*)& __m256i_result[0]) = 0xe9179100; +- *((unsigned long*)& __m256i_out[3]) = 0x0; +- *((unsigned long*)& __m256i_out[2]) = 0x0; +- *((unsigned long*)& __m256i_out[1]) = 0x0; +- *((unsigned long*)& __m256i_out[0]) = 0x0; +- __lasx_xvstelm_w(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x4); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_op0[3]) = 0x042f0500cfea969a; +- *((unsigned long*)& __m256i_op0[2]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_op0[1]) = 0xa98d4f7a77c308ee; +- *((unsigned long*)& __m256i_op0[0]) = 0x0ad152a5ad72feeb; +- *((unsigned long*)& __m256i_result[3]) = 0x0; +- *((unsigned long*)& __m256i_result[2]) = 0x0; +- *((unsigned long*)& __m256i_result[1]) = 0x0; +- *((unsigned long*)& __m256i_result[0]) = 0x58569d7be9179100; +- *((unsigned long*)& __m256i_out[3]) = 0x0; +- *((unsigned long*)& __m256i_out[2]) = 0x0; +- *((unsigned long*)& __m256i_out[1]) = 0x0; +- *((unsigned long*)& __m256i_out[0]) = 0x0; +- __lasx_xvstelm_d(__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x2); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_result[2]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_result[1]) = 0x6b6b6b6b6b6b6b6b; +- *((unsigned long*)& __m256i_result[0]) = 0x6b6b6b6b6b6b6b6b; +- __m256i_out = __lasx_xvrepli_b(-149); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0xfffffffffffffe69; +- *((unsigned long*)& __m256i_result[2]) = 0xfffffffffffffe69; +- *((unsigned long*)& __m256i_result[1]) = 0xfffffffffffffe69; +- *((unsigned long*)& __m256i_result[0]) = 0xfffffffffffffe69; +- __m256i_out = __lasx_xvrepli_d(-407); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffff76; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffff76; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffff76; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffff76; +- __m256i_out = __lasx_xvrepli_d(-138); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffa1; +- *((unsigned long*)& __m256i_result[2]) = 0xffffffffffffffa1; +- *((unsigned long*)& __m256i_result[1]) = 0xffffffffffffffa1; +- *((unsigned long*)& __m256i_result[0]) = 0xffffffffffffffa1; +- __m256i_out = __lasx_xvrepli_d(-95); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x0000000000000019; +- *((unsigned long*)& __m256i_result[2]) = 0x0000000000000019; +- *((unsigned long*)& __m256i_result[1]) = 0x0000000000000019; +- *((unsigned long*)& __m256i_result[0]) = 0x0000000000000019; +- __m256i_out = __lasx_xvrepli_d(25); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x000000000000001e; +- *((unsigned long*)& __m256i_result[2]) = 0x000000000000001e; +- *((unsigned long*)& __m256i_result[1]) = 0x000000000000001e; +- *((unsigned long*)& __m256i_result[0]) = 0x000000000000001e; +- __m256i_out = __lasx_xvrepli_d(30); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m256i_result[3]) = 0x8d8d8d8d8d8d8d8d; +- *((unsigned long*)& __m256i_result[2]) = 0x8d8d8d8d8d8d8d8d; +- *((unsigned long*)& __m256i_result[1]) = 0x8d8d8d8d8d8d8d8d; +- *((unsigned long*)& __m256i_result[0]) = 0x8d8d8d8d8d8d8d8d; +- __m256i_out = __lasx_xvrepli_b(-371); +- ASSERTEQ_64(__LINE__, __m256i_result, __m256i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff00000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0xffff8969ffffd7e2; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000d688ffffbd95; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xf12dfafc1ad1f7b3; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x4000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x34); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000002000000020; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000200000002000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000200000002000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000010000000100; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000000000ff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000001000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x2f); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000c0002000c0002; +- *((unsigned long*)& __m128i_op0[0]) = 0x000400c600700153; +- *((unsigned long*)& __m128i_op1[1]) = 0x000c0002000c0002; +- *((unsigned long*)& __m128i_op1[0]) = 0x000400c600700153; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x000000010000007f; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000fffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0800000400000800; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000001515151500; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001515151500; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0001515000015150; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000fdfd0404; +- *((unsigned long*)& __m128i_op1[1]) = 0x3fffffff3fffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0x3fffffff3fffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x000000000000fc08; +- *((unsigned long*)& __m128i_result[0]) = 0x8000800080008000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000000000fc08; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000800080008000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffba420000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x000007e044000400; +- *((unsigned long*)& __m128i_result[0]) = 0xfdd2100000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x25); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000081e003f3f3f; +- *((unsigned long*)& __m128i_op0[0]) = 0x3f3f3f0e00000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000081e003f3f3f; +- *((unsigned long*)& __m128i_op1[0]) = 0x3f3f3f0e00000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00000103c007e7e8; +- *((unsigned long*)& __m128i_result[0]) = 0x00000103c007e7e8; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x43); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0202022302023212; +- *((unsigned long*)& __m128i_op0[0]) = 0x0202ff3f02022212; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000002100003010; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000ff3f00002010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x79); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x1a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffff7fff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xe2bb5ff00e20aceb; +- *((unsigned long*)& __m128i_op1[0]) = 0xe2bb5ff00e20aceb; +- *((unsigned long*)& __m128i_result[1]) = 0x0100010000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x00e3000e00e3000e; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xf58df7841423142a; +- *((unsigned long*)& __m128i_op1[0]) = 0x3f7477f8ff4e2152; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x3d3e0505101e4008; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x2bd5d429e34a1efb; +- *((unsigned long*)& __m128i_op0[0]) = 0xfc0203fccbedbba7; +- *((unsigned long*)& __m128i_op1[1]) = 0xc9f66947f077afd0; +- *((unsigned long*)& __m128i_op1[0]) = 0x89fed7c07fdf5d00; +- *((unsigned long*)& __m128i_result[1]) = 0x14f1a50ffe65f6de; +- *((unsigned long*)& __m128i_result[0]) = 0xa3f83bd8e03fefaf; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x6ed694e00e0355db; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000010600000106; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0xe00e035606000001; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xe739e7ade77ae725; +- *((unsigned long*)& __m128i_op0[0]) = 0xbb9013bd049bc9ec; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x56aca41400000000; +- *((unsigned long*)& __m128i_result[1]) = 0x7ade77ae3bd049bd; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000041400000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x1010101010101010; +- *((unsigned long*)& __m128i_op1[0]) = 0x1010101010101010; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x8081808180818081; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000000006ff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0037f80000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x15); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x69); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0020202020202020; +- *((unsigned long*)& __m128i_op0[0]) = 0x0080808080c04040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0101010101010101; +- *((unsigned long*)& __m128i_op1[0]) = 0x0101010001808080; +- *((unsigned long*)& __m128i_result[1]) = 0x0000202000008081; +- *((unsigned long*)& __m128i_result[0]) = 0x0001010100010101; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x28); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xfff0000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0010000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x00fff00000001000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x28); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x6b); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000adf0000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001e00; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0040000000400040; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000020002020; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808102; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000001010102; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x7); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x001000100010000b; +- *((unsigned long*)& __m128i_op0[0]) = 0x03fc03fc03fc03fc; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x04000400ff01ff01; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xa); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x1010101010101010; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000fff800000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000000001ed68; +- *((unsigned long*)& __m128i_op1[1]) = 0x1ff6a09e667f3bd8; +- *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffffffe; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000007b5a; +- *((unsigned long*)& __m128i_result[0]) = 0x999fcef600000000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffe5c8000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x91f80badc162a0c4; +- *((unsigned long*)& __m128i_op1[0]) = 0x99d1ffff0101ff01; +- *((unsigned long*)& __m128i_result[1]) = 0x00ff400000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x905d0b06cf0008f8; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x3802f4fd025800f7; +- *((unsigned long*)& __m128i_op1[1]) = 0xc8ff0bffff00ffae; +- *((unsigned long*)& __m128i_op1[0]) = 0x91ff40fffff8ff50; +- *((unsigned long*)& __m128i_result[1]) = 0x0000200000000700; +- *((unsigned long*)& __m128i_result[0]) = 0x0000192000001240; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x33); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff0ffd0ffd; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffff0ffc0001; +- *((unsigned long*)& __m128i_op1[1]) = 0xbb7743ca4c78461f; +- *((unsigned long*)& __m128i_op1[0]) = 0xd9743eb5fb4deb3a; +- *((unsigned long*)& __m128i_result[1]) = 0x003fffffffc3ff44; +- *((unsigned long*)& __m128i_result[0]) = 0x002eddd0f2931e12; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x4a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xbb7743ca4c78461f; +- *((unsigned long*)& __m128i_op0[0]) = 0xd9743eb5fb4deb3a; +- *((unsigned long*)& __m128i_op1[1]) = 0x22445e1ad9c3e4f0; +- *((unsigned long*)& __m128i_op1[0]) = 0x1b43e8a30a570a63; +- *((unsigned long*)& __m128i_result[1]) = 0x743ca4c843eb5fb5; +- *((unsigned long*)& __m128i_result[0]) = 0x45e1ad9c3e8a30a5; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x14); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x1204900f62f72565; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x4901725600000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x4); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x6a); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000400000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x12); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000300000003; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x32); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x2); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x3f3f3f7fbf3fffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x47); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000040804080; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000020100000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xe); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffe8ffff28fc; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffffffa; +- *((unsigned long*)& __m128i_op1[1]) = 0x00007fff0000803e; +- *((unsigned long*)& __m128i_op1[0]) = 0x00000006ffff81e1; +- *((unsigned long*)& __m128i_result[1]) = 0x0ffffffe8ffff290; +- *((unsigned long*)& __m128i_result[0]) = 0x000007fff0000804; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x44); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000418200000008e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000002100047; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x1); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x6363636363636362; +- *((unsigned long*)& __m128i_op0[0]) = 0x6363636363636362; +- *((unsigned long*)& __m128i_op1[1]) = 0x6363636363636362; +- *((unsigned long*)& __m128i_op1[0]) = 0x6363636363636362; +- *((unsigned long*)& __m128i_result[1]) = 0x0032003200320032; +- *((unsigned long*)& __m128i_result[0]) = 0x0032003200320032; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x19); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffff01010102; +- *((unsigned long*)& __m128i_op0[0]) = 0x7ffdf87f0b0c7f7f; +- *((unsigned long*)& __m128i_op1[1]) = 0xf6b3eb63f6b3f6b3; +- *((unsigned long*)& __m128i_op1[0]) = 0x363953e42b56432e; +- *((unsigned long*)& __m128i_result[1]) = 0x010000010080000b; +- *((unsigned long*)& __m128i_result[0]) = 0x00f700f70036002b; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x18); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xed67d6c7ed67ed67; +- *((unsigned long*)& __m128i_op1[0]) = 0x6c72a7c856ac865c; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000700000003; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x3d); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xffffffffff40ff83; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x1010101010101010; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xc); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000003030103; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000003030103; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000006060; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000006060; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000002408beb26c8; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x000000000000706e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000028c27; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000070; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x8); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x80000b0b80000b0b; +- *((unsigned long*)& __m128i_op0[0]) = 0x8000101080001010; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffefefffffeff0; +- *((unsigned long*)& __m128i_result[1]) = 0x0061006100020002; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000fe00fe; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x3); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000078087f08; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000078087f08; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000e0fc0000e0fc; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0x6); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ff0bff76; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x75); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x33); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x000000ff00ff0000; +- *((unsigned long*)& __m128i_op0[0]) = 0x000000ff00ffffff; +- *((unsigned long*)& __m128i_op1[1]) = 0x8282828282828282; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000828282828282; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0008000800000008; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00f7000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000005150; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000005150; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000000f7000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x24); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x41afddcb1c000000; +- *((unsigned long*)& __m128i_op1[1]) = 0xd09e1bd99a2c6eb1; +- *((unsigned long*)& __m128i_op1[0]) = 0xe82f7c27bb0778af; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000040002; +- *((unsigned long*)& __m128i_result[0]) = 0x000d000a000f000c; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x1c); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff8000; +- *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffdff0; +- *((unsigned long*)& __m128i_op1[1]) = 0x8000000080000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0144329880000000; +- *((unsigned long*)& __m128i_result[1]) = 0x007fffc0007ffff0; +- *((unsigned long*)& __m128i_result[0]) = 0x004000004c400000; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x9); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x17); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000001e0000001e; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xd); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op0[0]) = 0xfffafff0fff9ff01; +- *((unsigned long*)& __m128i_op1[1]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000200000002; +- *((unsigned long*)& __m128i_result[1]) = 0x00000000d800cff8; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; +- __m128i_out = __lsx_vsrlrni_h_w(__m128i_op0,__m128i_op1,0x5); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000001; +- *((unsigned long*)& __m128i_op1[1]) = 0x00000002000007d7; +- *((unsigned long*)& __m128i_op1[0]) = 0x0000000300000ff1; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000001; +- *((unsigned long*)& __m128i_result[0]) = 0x000007d700000ff1; +- __m128i_out = __lsx_vsrlrni_w_d(__m128i_op0,__m128i_op1,0x0); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0xff800000ff800000; +- *((unsigned long*)& __m128i_op0[0]) = 0x7fc000007fc00000; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffff00ffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffff00ffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000ff8; +- *((unsigned long*)& __m128i_result[0]) = 0x0000000000001000; +- __m128i_out = __lsx_vsrlrni_d_q(__m128i_op0,__m128i_op1,0x74); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; +- *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000f08; +- *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; +- *((unsigned long*)& __m128i_result[1]) = 0x0000000000000002; +- *((unsigned long*)& __m128i_result[0]) = 0x2020202020202020; +- __m128i_out = __lsx_vsrlrni_b_h(__m128i_op0,__m128i_op1,0xb); +- ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); +- +- return 0; +-} +diff --git a/gcc/testsuite/gcc.target/loongarch/larch-builtin.c b/gcc/testsuite/gcc.target/loongarch/larch-builtin.c +new file mode 100644 +index 000000000..ca7ddb140 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/larch-builtin.c +@@ -0,0 +1,265 @@ ++/* Test for LoongArch intrinsics. */ ++ ++/* { dg-do compile } */ ++ ++/* { dg-final { scan-assembler-times "test_rdtime_d:.*rdtime\\.d.*\\.size test_rdtime_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_rdtimeh_w:.*rdtimeh\\.w.*\\.size test_rdtimeh_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_rdtimel_w:.*rdtimel\\.w.*\\.size test_rdtimel_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_movfcsr2gr:.*movfcsr2gr.*\\.size test_movfcsr2gr" 1 } } */ ++/* { dg-final { scan-assembler-times "test_movgr2fcsr:.*movgr2fcsr.*\\.size test_movgr2fcsr" 1 } } */ ++/* { dg-final { scan-assembler-times "test_cacop_d:.*cacop.*\\.size test_cacop_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_cpucfg:.*cpucfg.*\\.size test_cpucfg" 1 } } */ ++/* { dg-final { scan-assembler-times "test_asrtle_d:.*asrtle\\.d.*\\.size test_asrtle_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_asrtgt_d:.*asrtgt\\.d.*\\.size test_asrtgt_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_lddir_d:.*lddir.*\\.size test_lddir_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_ldpte_d:.*ldpte.*\\.size test_ldpte_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_crc_w_b_w:.*crc\\.w\\.b\\.w.*\\.size test_crc_w_b_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_crc_w_h_w:.*crc\\.w\\.h\\.w.*\\.size test_crc_w_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_crc_w_w_w:.*crc\\.w\\.w\\.w.*\\.size test_crc_w_w_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_crc_w_d_w:.*crc\\.w\\.d\\.w.*\\.size test_crc_w_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_crcc_w_b_w:.*crcc\\.w\\.b\\.w.*\\.size test_crcc_w_b_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_crcc_w_h_w:.*crcc\\.w\\.h\\.w.*\\.size test_crcc_w_h_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_crcc_w_w_w:.*crcc\\.w\\.w\\.w.*\\.size test_crcc_w_w_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_crcc_w_d_w:.*crcc\\.w\\.d\\.w.*\\.size test_crcc_w_d_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_csrrd_w:.*csrrd.*\\.size test_csrrd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_csrwr_w:.*csrwr.*\\.size test_csrwr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_csrxchg_w:.*csrxchg.*\\.size test_csrxchg_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_csrrd_d:.*csrrd.*\\.size test_csrrd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_csrwr_d:.*csrwr.*\\.size test_csrwr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_csrxchg_d:.*csrxchg.*\\.size test_csrxchg_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_iocsrrd_b:.*iocsrrd\\.b.*\\.size test_iocsrrd_b" 1 } } */ ++/* { dg-final { scan-assembler-times "test_iocsrrd_h:.*iocsrrd\\.h.*\\.size test_iocsrrd_h" 1 } } */ ++/* { dg-final { scan-assembler-times "test_iocsrrd_w:.*iocsrrd\\.w.*\\.size test_iocsrrd_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_iocsrrd_d:.*iocsrrd\\.d.*\\.size test_iocsrrd_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_iocsrwr_b:.*iocsrwr\\.b.*\\.size test_iocsrwr_b" 1 } } */ ++/* { dg-final { scan-assembler-times "test_iocsrwr_h:.*iocsrwr\\.h.*\\.size test_iocsrwr_h" 1 } } */ ++/* { dg-final { scan-assembler-times "test_iocsrwr_w:.*iocsrwr\\.w.*\\.size test_iocsrwr_w" 1 } } */ ++/* { dg-final { scan-assembler-times "test_iocsrwr_d:.*iocsrwr\\.d.*\\.size test_iocsrwr_d" 1 } } */ ++/* { dg-final { scan-assembler-times "test_dbar:.*dbar.*\\.size test_dbar" 1 } } */ ++/* { dg-final { scan-assembler-times "test_ibar:.*ibar.*\\.size test_ibar" 1 } } */ ++/* { dg-final { scan-assembler-times "test_syscall:.*syscall.*\\.size test_syscall" 1 } } */ ++/* { dg-final { scan-assembler-times "test_break:.*break.*\\.size test_break" 1 } } */ ++ ++#include ++ ++__drdtime_t ++test_rdtime_d () ++{ ++ return __rdtime_d (); ++} ++ ++__rdtime_t ++test_rdtimeh_w () ++{ ++ return __rdtimeh_w (); ++} ++ ++__rdtime_t ++test_rdtimel_w () ++{ ++ return __rdtimel_w (); ++} ++ ++unsigned int ++test_movfcsr2gr () ++{ ++ return __movfcsr2gr (1); ++} ++ ++void ++test_movgr2fcsr (unsigned int _1) ++{ ++ __movgr2fcsr (1, _1); ++} ++ ++void ++test_cacop_d (unsigned long int _1) ++{ ++ __cacop_d (1, _1, 1); ++} ++ ++unsigned int ++test_cpucfg (unsigned int _1) ++{ ++ return __cpucfg (_1); ++} ++ ++void ++test_asrtle_d (long int _1, long int _2) ++{ ++ __asrtle_d (_1, _2); ++} ++ ++void ++test_asrtgt_d (long int _1, long int _2) ++{ ++ __asrtgt_d (_1, _2); ++} ++ ++long int ++test_lddir_d (long int _1) ++{ ++ return __lddir_d (_1, 1); ++} ++ ++void ++test_ldpte_d (long int _1) ++{ ++ __ldpte_d (_1, 1); ++} ++ ++int ++test_crc_w_b_w (char _1, int _2) ++{ ++ return __crc_w_b_w (_1, _2); ++} ++ ++int ++test_crc_w_h_w (short _1, int _2) ++{ ++ return __crc_w_h_w (_1, _2); ++} ++ ++int ++test_crc_w_w_w (int _1, int _2) ++{ ++ return __crc_w_w_w (_1, _2); ++} ++ ++int ++test_crc_w_d_w (long int _1, int _2) ++{ ++ return __crc_w_d_w (_1, _2); ++} ++ ++int ++test_crcc_w_b_w (char _1, int _2) ++{ ++ return __crcc_w_b_w (_1, _2); ++} ++ ++int ++test_crcc_w_h_w (short _1, int _2) ++{ ++ return __crcc_w_h_w (_1, _2); ++} ++ ++int ++test_crcc_w_w_w (int _1, int _2) ++{ ++ return __crcc_w_w_w (_1, _2); ++} ++ ++int ++test_crcc_w_d_w (long int _1, int _2) ++{ ++ return __crcc_w_d_w (_1, _2); ++} ++ ++unsigned int ++test_csrrd_w () ++{ ++ return __csrrd_w (1); ++} ++ ++unsigned int ++test_csrwr_w (unsigned int _1) ++{ ++ return __csrwr_w (_1, 1); ++} ++ ++unsigned int ++test_csrxchg_w (unsigned int _1, unsigned int _2) ++{ ++ return __csrxchg_w (_1, _2, 1); ++} ++ ++unsigned long int ++test_csrrd_d () ++{ ++ return __csrrd_d (1); ++} ++ ++unsigned long int ++test_csrwr_d (unsigned long int _1) ++{ ++ return __csrwr_d (_1, 1); ++} ++ ++unsigned long int ++test_csrxchg_d (unsigned long int _1, unsigned long int _2) ++{ ++ return __csrxchg_d (_1, _2, 1); ++} ++ ++unsigned char ++test_iocsrrd_b (unsigned int _1) ++{ ++ return __iocsrrd_b (_1); ++} ++ ++unsigned char ++test_iocsrrd_h (unsigned int _1) ++{ ++ return __iocsrrd_h (_1); ++} ++ ++unsigned int ++test_iocsrrd_w (unsigned int _1) ++{ ++ return __iocsrrd_w (_1); ++} ++ ++unsigned long int ++test_iocsrrd_d (unsigned int _1) ++{ ++ return __iocsrrd_d (_1); ++} ++ ++void ++test_iocsrwr_b (unsigned char _1, unsigned int _2) ++{ ++ __iocsrwr_b (_1, _2); ++} ++ ++void ++test_iocsrwr_h (unsigned short _1, unsigned int _2) ++{ ++ __iocsrwr_h (_1, _2); ++} ++ ++void ++test_iocsrwr_w (unsigned int _1, unsigned int _2) ++{ ++ __iocsrwr_w (_1, _2); ++} ++ ++void ++test_iocsrwr_d (unsigned long int _1, unsigned int _2) ++{ ++ __iocsrwr_d (_1, _2); ++} ++ ++void ++test_dbar () ++{ ++ __dbar (1); ++} ++ ++void ++test_ibar () ++{ ++ __ibar (1); ++} ++ ++void ++test_syscall () ++{ ++ __syscall (1); ++} ++ ++void ++test_break () ++{ ++ __break (1); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/loongarch.exp b/gcc/testsuite/gcc.target/loongarch/loongarch.exp +index be9543d38..bebc00047 100644 +--- a/gcc/testsuite/gcc.target/loongarch/loongarch.exp ++++ b/gcc/testsuite/gcc.target/loongarch/loongarch.exp +@@ -1,4 +1,4 @@ +-# Copyright (C) 2017-2018 Free Software Foundation, Inc. ++# Copyright (C) 2020-2022 Free Software Foundation, Inc. + + # This program is free software; you can redistribute it and/or modify + # it under the terms of the GNU General Public License as published by +diff --git a/gcc/testsuite/gcc.target/loongarch/math-float-128.c b/gcc/testsuite/gcc.target/loongarch/math-float-128.c +new file mode 100644 +index 000000000..387566a57 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/math-float-128.c +@@ -0,0 +1,81 @@ ++/* { dg-do compile } */ ++/* { dg-options " -march=loongarch64 -O2 " } */ ++/* { dg-final { scan-assembler-not "my_fabsq2:.*\\bl\t%plt\\(__builtin_fabsq\\).*my_fabsq2" } } */ ++/* { dg-final { scan-assembler-not "my_copysignq2:.*\\bl\t%plt\\(__builtin_copysignq\\).*my_copysignq2" } } */ ++/* { dg-final { scan-assembler-not "my_infq2:.*\\bl\t%plt\\(__builtin_infq\\).*my_infq2" } } */ ++/* { dg-final { scan-assembler-not "my_huge_valq2:.*\\bl\t%plt\\(__builtin_huge_valq\\).*my_huge_valq2" } } */ ++/* { dg-final { scan-assembler-not "my_nanq2:.*\\bl\t%plt\\(__builtin_nanq\\).*my_nanq2" } } */ ++/* { dg-final { scan-assembler-not "my_nansq2:.*\\bl\t%plt\\(__builtin_nansq\\).*my_nansq2" } } */ ++ ++__float128 ++my_fabsq1 (__float128 a) ++{ ++ return __builtin_fabsq (a); ++} ++ ++_Float128 ++my_fabsq2 (_Float128 a) ++{ ++ return __builtin_fabsq (a); ++} ++ ++__float128 ++my_copysignq1 (__float128 a, __float128 b) ++{ ++ return __builtin_copysignq (a, b); ++} ++ ++_Float128 ++my_copysignq2 (_Float128 a, _Float128 b) ++{ ++ return __builtin_copysignq (a, b); ++} ++ ++__float128 ++my_infq1 (void) ++{ ++ return __builtin_infq (); ++} ++ ++_Float128 ++my_infq2 (void) ++{ ++ return __builtin_infq (); ++} ++ ++__float128 ++my_huge_valq1 (void) ++{ ++ return __builtin_huge_valq (); ++} ++ ++_Float128 ++my_huge_valq2 (void) ++{ ++ return __builtin_huge_valq (); ++} ++ ++__float128 ++my_nanq1 (void) ++{ ++ return __builtin_nanq (""); ++} ++ ++_Float128 ++my_nanq2 (void) ++{ ++ return __builtin_nanq (""); ++} ++ ++__float128 ++my_nansq1 (void) ++{ ++ return __builtin_nansq (""); ++} ++ ++_Float128 ++my_nansq2 (void) ++{ ++ return __builtin_nansq (""); ++} ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-inline-lasx-strict-align.c b/gcc/testsuite/gcc.target/loongarch/memcpy-inline-lasx-strict-align.c +new file mode 100644 +index 000000000..a7405d6cf +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-inline-lasx-strict-align.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx -mstrict-align" } */ ++/* { dg-final { scan-assembler-not "xvld" } } */ ++ ++#include ++ ++void mycpy(int *a, int *b) { ++ memcpy(a, b, 256); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-inline-lasx.c b/gcc/testsuite/gcc.target/loongarch/memcpy-inline-lasx.c +new file mode 100644 +index 000000000..10b7ef13f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-inline-lasx.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mno-strict-align -mlasx" } */ ++/* { dg-final { scan-assembler "xvld" } } */ ++ ++#include ++ ++void mycpy(int *a, int *b) { ++ memcpy(a, b, 256); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-inline-noalign.c b/gcc/testsuite/gcc.target/loongarch/memcpy-inline-noalign.c +new file mode 100644 +index 000000000..8fe589cc0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-inline-noalign.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3" } */ ++/* { dg-final { scan-assembler-not "ld.bu" } } */ ++ ++#include ++ ++void mycpy(int *a, int *b) { ++ memcpy(a, b, 256); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-inline-strict-align.c b/gcc/testsuite/gcc.target/loongarch/memcpy-inline-strict-align.c +new file mode 100644 +index 000000000..8696825ab +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-inline-strict-align.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mstrict-align" } */ ++/* { dg-final { scan-assembler "ld.bu" } } */ ++ ++#include ++ ++void mycpy(int *a, int *b) { ++ memcpy(a, b, 256); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/mulh.c b/gcc/testsuite/gcc.target/loongarch/mulh.c +index 08760219b..bef35828d 100644 +--- a/gcc/testsuite/gcc.target/loongarch/mulh.c ++++ b/gcc/testsuite/gcc.target/loongarch/mulh.c +@@ -10,3 +10,4 @@ f (SI x, SI y) + { + return ((DI) x * y) >> 32; + } ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/mulw_d.c b/gcc/testsuite/gcc.target/loongarch/mulw_d.c +index 04696adb4..db8c0d867 100644 +--- a/gcc/testsuite/gcc.target/loongarch/mulw_d.c ++++ b/gcc/testsuite/gcc.target/loongarch/mulw_d.c +@@ -10,3 +10,4 @@ f (SI x, SI y) + { + return (DI) x * y; + } ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/pr106459.c b/gcc/testsuite/gcc.target/loongarch/pr106459.c +new file mode 100644 +index 000000000..eb737dc49 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr106459.c +@@ -0,0 +1,13 @@ ++/* { dg-do compile } */ ++ ++/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106459 */ ++ ++typedef unsigned int UDItype __attribute__((mode(DI))); ++int foo(UDItype x) { ++ x = x & (((UDItype)(((UDItype)(((UDItype)0x0F << 8) | 0x0F) << (2 * 8)) | ++ (((UDItype)0x0F << 8) | 0x0F)) ++ << (4 * 8)) | ++ (((UDItype)(((UDItype)0x0F << 8) | 0x0F) << (2 * 8)) | ++ (((UDItype)0x0F << 8) | 0x0F))); ++ return x; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/pr112476-3.c b/gcc/testsuite/gcc.target/loongarch/pr112476-3.c +new file mode 100644 +index 000000000..d696d4182 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr112476-3.c +@@ -0,0 +1,58 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlsx" } */ ++ ++#include ++ ++typedef int8_t orc_int8; ++typedef int16_t orc_int16; ++typedef int32_t orc_int32; ++typedef int64_t orc_int64; ++ ++typedef union ++{ ++ orc_int32 i; ++ float f; ++ orc_int16 x2[2]; ++ orc_int8 x4[4]; ++} orc_union32; ++typedef union ++{ ++ orc_int64 i; ++ double f; ++ orc_int32 x2[2]; ++ float x2f[2]; ++ orc_int16 x4[4]; ++} orc_union64; ++ ++void ++audio_orc_s32_to_double (double * restrict d1, ++ const signed int * restrict s1, int n) ++{ ++ int i; ++ orc_union64 *restrict ptr0; ++ const orc_union32 *restrict ptr4; ++ orc_union32 var33; ++ orc_union64 var34; ++ orc_union64 var35; ++ orc_union64 var36; ++ ++ ptr0 = (orc_union64 *) d1; ++ ptr4 = (orc_union32 *) s1; ++ ++ var34.i = 0x41e0000000000000UL; ++ ++ for (i = 0; i < n; i++) { ++ var33 = ptr4[i]; ++ var36.f = var33.i; ++ { ++ orc_union64 _src1; ++ orc_union64 _src2; ++ orc_union64 _dest1; ++ _src1.i = ((var36.i) & ((((var36.i)&0x7ff0000000000000UL) == 0) ? 0xfff0000000000000UL : 0xffffffffffffffffUL)); ++ _src2.i = ((var34.i) & ((((var34.i)&0x7ff0000000000000UL) == 0) ? 0xfff0000000000000UL : 0xffffffffffffffffUL)); ++ _dest1.f = _src1.f / _src2.f; ++ var35.i = ((_dest1.i) & ((((_dest1.i)&0x7ff0000000000000UL) == 0) ? 0xfff0000000000000UL : 0xffffffffffffffffUL)); ++ } ++ ptr0[i] = var35; ++ } ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/pr112476-4.c b/gcc/testsuite/gcc.target/loongarch/pr112476-4.c +new file mode 100644 +index 000000000..955d98552 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr112476-4.c +@@ -0,0 +1,4 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++ ++#include "pr112476-3.c" +diff --git a/gcc/testsuite/gcc.target/loongarch/prolog-opt.c b/gcc/testsuite/gcc.target/loongarch/prolog-opt.c +new file mode 100644 +index 000000000..676ce80bb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/prolog-opt.c +@@ -0,0 +1,14 @@ ++/* Test that LoongArch backend stack drop operation optimized. */ ++ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mabi=lp64d" } */ ++/* { dg-final { scan-assembler "addi.d\t\\\$r3,\\\$r3,-16" } } */ ++ ++extern int printf (char *, ...); ++ ++int main() ++{ ++ char buf[1024 * 12]; ++ printf ("%p\n", buf); ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/recip_sqrt.c b/gcc/testsuite/gcc.target/loongarch/recip_sqrt.c +new file mode 100644 +index 000000000..ac1a8f177 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/recip_sqrt.c +@@ -0,0 +1,11 @@ ++/* { dg-do compile } */ ++/* { dg-options "-Ofast -fdump-tree-optimized" } */ ++ ++float ++foo (float a) ++{ ++ float tmp = 1.0f / __builtin_sqrtf (a); ++ return tmp; ++} ++ ++/* { dg-final { scan-tree-dump-not " / " "optimized" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-realign.c b/gcc/testsuite/gcc.target/loongarch/stack-realign.c +new file mode 100644 +index 000000000..17fe22d91 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-realign.c +@@ -0,0 +1,34 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mlasx -O -mstackrealign" } */ ++ ++extern void abort (void); ++ ++typedef double __m256 __attribute__ ((__vector_size__ (32), __may_alias__)); ++ ++static __m256 ++load_m256 (double *e) ++{ ++ return * (__m256 *) e; ++} ++ ++typedef union ++{ ++ __m256 x; ++ double a[4]; ++} union256; ++ ++void test (void) ++{ ++ union256 u; ++ double e[4] __attribute__ ((aligned (32))) ++ = {2134.3343, 1234.635654, 1.2234, 876.8976}; ++ int i; ++ ++ u.x = load_m256 (e); ++ ++ for (i = 0; i < 4; i++) ++ if (u.a[i] != e[i]) ++ abort (); ++} ++ ++/* { dg-final { scan-assembler "bstrins.d\t\\\$r3,\\\$r0,4,0" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/stack-usage-realign.c b/gcc/testsuite/gcc.target/loongarch/stack-usage-realign.c +new file mode 100644 +index 000000000..666d84276 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/stack-usage-realign.c +@@ -0,0 +1,19 @@ ++/* { dg-do compile } */ ++/* { dg-options "-fstack-usage -mlasx -mforce-drap -mstackrealign" } */ ++ ++typedef int __attribute__((vector_size(32))) vec; ++ ++vec foo (vec v) ++{ ++ return v; ++} ++ ++int main (void) ++{ ++ vec V; ++ V = foo (V); ++ return 0; ++} ++ ++/* { dg-final { scan-stack-usage "main\t96\tdynamic,bounded" } } */ ++/* { dg-final { cleanup-stack-usage } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/strict-align.c b/gcc/testsuite/gcc.target/loongarch/strict-align.c +new file mode 100644 +index 000000000..bcad2b84f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/strict-align.c +@@ -0,0 +1,13 @@ ++/* { dg-do compile } */ ++/* { dg-options "-Ofast -mstrict-align -mlasx" } */ ++/* { dg-final { scan-assembler-not "vfadd.s" } } */ ++ ++void ++foo (float* restrict x, float* restrict y) ++{ ++ x[0] = x[0] + y[0]; ++ x[1] = x[1] + y[1]; ++ x[2] = x[2] + y[2]; ++ x[3] = x[3] + y[3]; ++} ++ +diff --git a/gcc/testsuite/gcc.target/loongarch/vec-unpack.c b/gcc/testsuite/gcc.target/loongarch/vec-unpack.c +index 3e0f5bb92..a7fa86519 100644 +--- a/gcc/testsuite/gcc.target/loongarch/vec-unpack.c ++++ b/gcc/testsuite/gcc.target/loongarch/vec-unpack.c +@@ -3,16 +3,16 @@ + /* { dg-final { scan-assembler-times "xvpermi.d" 2} } */ + /* { dg-final { scan-assembler-times "xvfcvtl.d.s" 2} } */ + /* { dg-final { scan-assembler-times "xvfcvth.d.s" 2} } */ +- ++ + #define N 16 +-float f[N]; +-double d[N]; +-int n[N]; +- ++float f[N]; ++double d[N]; ++int n[N]; ++ + __attribute__((noinline)) void +-foo (void) +-{ +- int i; ++foo (void) ++{ ++ int i; + for (i = 0; i < N; i++) + d[i] = f[i]; + } +diff --git a/gcc/testsuite/gcc.target/loongarch/vec_initv32qiv16qi.c b/gcc/testsuite/gcc.target/loongarch/vec_initv32qiv16qi.c +index bc1ca7a08..84e57645a 100644 +--- a/gcc/testsuite/gcc.target/loongarch/vec_initv32qiv16qi.c ++++ b/gcc/testsuite/gcc.target/loongarch/vec_initv32qiv16qi.c +@@ -5,15 +5,15 @@ typedef unsigned char uint8_t; + + int + test_func (uint8_t *pix1, int i_stride_pix1, +- uint8_t *pix2, int i_stride_pix2) ++ uint8_t *pix2, int i_stride_pix2) + { + int i_sum = 0; + for (int y = 0; y < 16; y++) + { + for (int x = 0; x < 16; x++) +- { ++ { + i_sum += __builtin_abs (pix1[x] - pix2[x]); +- } ++ } + pix1 += i_stride_pix1; + pix2 += i_stride_pix2; + } +diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c +new file mode 100644 +index 000000000..b56ef5825 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c +@@ -0,0 +1,21 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mlsx -fno-fp-int-builtin-inexact" } */ ++ ++#include "vect-frint-scalar.c" ++ ++/* cannot use LSX for these with -fno-fp-int-builtin-inexact, ++ call library function. */ ++/* { dg-final { scan-assembler "\t%plt\\(ceil\\)" } } */ ++/* { dg-final { scan-assembler "\t%plt\\(ceilf\\)" } } */ ++/* { dg-final { scan-assembler "\t%plt\\(floor\\)" } } */ ++/* { dg-final { scan-assembler "\t%plt\\(floorf\\)" } } */ ++/* { dg-final { scan-assembler "\t%plt\\(trunc\\)" } } */ ++/* { dg-final { scan-assembler "\t%plt\\(truncf\\)" } } */ ++ ++/* nearbyint is not allowed to rasie FE_INEXACT for decades */ ++/* { dg-final { scan-assembler "\t%plt\\(nearbyint\\)" } } */ ++/* { dg-final { scan-assembler "\t%plt\\(nearbyintf\\)" } } */ ++ ++/* rint should just use basic FP operation */ ++/* { dg-final { scan-assembler "\tfrint\.s" } } */ ++/* { dg-final { scan-assembler "\tfrint\.d" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c +new file mode 100644 +index 000000000..e566fbccc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c +@@ -0,0 +1,39 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mlsx" } */ ++ ++#define test(func, suffix) \ ++__typeof__ (1.##suffix) \ ++_##func##suffix (__typeof__ (1.##suffix) x) \ ++{ \ ++ return __builtin_##func##suffix (x); \ ++} ++ ++test (ceil, f) ++test (ceil, ) ++test (floor, f) ++test (floor, ) ++test (trunc, f) ++test (trunc, ) ++test (nearbyint, f) ++test (nearbyint, ) ++test (rint, f) ++test (rint, ) ++ ++/* { dg-final { scan-assembler "\tvfrintrp\.s" } } */ ++/* { dg-final { scan-assembler "\tvfrintrm\.s" } } */ ++/* { dg-final { scan-assembler "\tvfrintrz\.s" } } */ ++/* { dg-final { scan-assembler "\tvfrintrp\.d" } } */ ++/* { dg-final { scan-assembler "\tvfrintrm\.d" } } */ ++/* { dg-final { scan-assembler "\tvfrintrz\.d" } } */ ++ ++/* must do vreplvei first */ ++/* { dg-final { scan-assembler-times "\tvreplvei\.w\t\\\$vr0,\\\$vr0,0" 3 } } */ ++/* { dg-final { scan-assembler-times "\tvreplvei\.d\t\\\$vr0,\\\$vr0,0" 3 } } */ ++ ++/* nearbyint is not allowed to rasie FE_INEXACT for decades */ ++/* { dg-final { scan-assembler "\t%plt\\(nearbyint\\)" } } */ ++/* { dg-final { scan-assembler "\t%plt\\(nearbyintf\\)" } } */ ++ ++/* rint should just use basic FP operation */ ++/* { dg-final { scan-assembler "\tfrint\.s" } } */ ++/* { dg-final { scan-assembler "\tfrint\.d" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/lasx-builtin.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c +similarity index 51% +rename from gcc/testsuite/gcc.target/loongarch/lasx-builtin.c +rename to gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c +index 1f563ec81..b1a903b4a 100644 +--- a/gcc/testsuite/gcc.target/loongarch/lasx-builtin.c ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c +@@ -738,772 +738,3723 @@ + /* { dg-final { scan-assembler-times "lasx_xvrepli_h:.*xvrepli\\.h.*lasx_xvrepli_h" 1 } } */ + /* { dg-final { scan-assembler-times "lasx_xvrepli_w:.*xvrepli\\.w.*lasx_xvrepli_w" 1 } } */ + +-typedef signed char v32i8 __attribute__ ((vector_size(32), aligned(32))); +-typedef signed char v32i8_b __attribute__ ((vector_size(32), aligned(1))); +-typedef unsigned char v32u8 __attribute__ ((vector_size(32), aligned(32))); +-typedef unsigned char v32u8_b __attribute__ ((vector_size(32), aligned(1))); +-typedef short v16i16 __attribute__ ((vector_size(32), aligned(32))); +-typedef short v16i16_h __attribute__ ((vector_size(32), aligned(2))); +-typedef unsigned short v16u16 __attribute__ ((vector_size(32), aligned(32))); +-typedef unsigned short v16u16_h __attribute__ ((vector_size(32), aligned(2))); +-typedef int v8i32 __attribute__ ((vector_size(32), aligned(32))); +-typedef int v8i32_w __attribute__ ((vector_size(32), aligned(4))); +-typedef unsigned int v8u32 __attribute__ ((vector_size(32), aligned(32))); +-typedef unsigned int v8u32_w __attribute__ ((vector_size(32), aligned(4))); +-typedef long long v4i64 __attribute__ ((vector_size(32), aligned(32))); +-typedef long long v4i64_d __attribute__ ((vector_size(32), aligned(8))); +-typedef unsigned long long v4u64 __attribute__ ((vector_size(32), aligned(32))); +-typedef unsigned long long v4u64_d __attribute__ ((vector_size(32), aligned(8))); +-typedef float v8f32 __attribute__ ((vector_size(32), aligned(32))); +-typedef float v8f32_w __attribute__ ((vector_size(32), aligned(4))); +-typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); +-typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); +- +-typedef double v4f64 __attribute__ ((vector_size(32), aligned(32))); +-typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8))); ++typedef signed char v32i8 __attribute__ ((vector_size (32), aligned (32))); ++typedef signed char v32i8_b __attribute__ ((vector_size (32), aligned (1))); ++typedef unsigned char v32u8 __attribute__ ((vector_size (32), aligned (32))); ++typedef unsigned char v32u8_b __attribute__ ((vector_size (32), aligned (1))); ++typedef short v16i16 __attribute__ ((vector_size (32), aligned (32))); ++typedef short v16i16_h __attribute__ ((vector_size (32), aligned (2))); ++typedef unsigned short v16u16 __attribute__ ((vector_size (32), aligned (32))); ++typedef unsigned short v16u16_h ++ __attribute__ ((vector_size (32), aligned (2))); ++typedef int v8i32 __attribute__ ((vector_size (32), aligned (32))); ++typedef int v8i32_w __attribute__ ((vector_size (32), aligned (4))); ++typedef unsigned int v8u32 __attribute__ ((vector_size (32), aligned (32))); ++typedef unsigned int v8u32_w __attribute__ ((vector_size (32), aligned (4))); ++typedef long long v4i64 __attribute__ ((vector_size (32), aligned (32))); ++typedef long long v4i64_d __attribute__ ((vector_size (32), aligned (8))); ++typedef unsigned long long v4u64 ++ __attribute__ ((vector_size (32), aligned (32))); ++typedef unsigned long long v4u64_d ++ __attribute__ ((vector_size (32), aligned (8))); ++typedef float v8f32 __attribute__ ((vector_size (32), aligned (32))); ++typedef float v8f32_w __attribute__ ((vector_size (32), aligned (4))); ++typedef double v4f64 __attribute__ ((vector_size (32), aligned (32))); ++typedef double v4f64_d __attribute__ ((vector_size (32), aligned (8))); ++ ++typedef double v4f64 __attribute__ ((vector_size (32), aligned (32))); ++typedef double v4f64_d __attribute__ ((vector_size (32), aligned (8))); + + typedef float __m256 __attribute__ ((__vector_size__ (32), __may_alias__)); +-typedef long long __m256i __attribute__ ((__vector_size__ (32), __may_alias__)); ++typedef long long __m256i ++ __attribute__ ((__vector_size__ (32), __may_alias__)); + typedef double __m256d __attribute__ ((__vector_size__ (32), __may_alias__)); +- ++ + /* Unaligned version of the same types. */ +-typedef float __m256_u __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); +-typedef long long __m256i_u __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); +-typedef double __m256d_u __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); ++typedef float __m256_u ++ __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); ++typedef long long __m256i_u ++ __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); ++typedef double __m256d_u ++ __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1))); + +-v32i8 __lasx_xvsll_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsll_b(_1, _2);} +-v16i16 __lasx_xvsll_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsll_h(_1, _2);} +-v8i32 __lasx_xvsll_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsll_w(_1, _2);} +-v4i64 __lasx_xvsll_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsll_d(_1, _2);} +-v32i8 __lasx_xvslli_b(v32i8 _1){return __builtin_lasx_xvslli_b(_1, 1);} +-v16i16 __lasx_xvslli_h(v16i16 _1){return __builtin_lasx_xvslli_h(_1, 1);} +-v8i32 __lasx_xvslli_w(v8i32 _1){return __builtin_lasx_xvslli_w(_1, 1);} +-v4i64 __lasx_xvslli_d(v4i64 _1){return __builtin_lasx_xvslli_d(_1, 1);} +-v32i8 __lasx_xvsra_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsra_b(_1, _2);} +-v16i16 __lasx_xvsra_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsra_h(_1, _2);} +-v8i32 __lasx_xvsra_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsra_w(_1, _2);} +-v4i64 __lasx_xvsra_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsra_d(_1, _2);} +-v32i8 __lasx_xvsrai_b(v32i8 _1){return __builtin_lasx_xvsrai_b(_1, 1);} +-v16i16 __lasx_xvsrai_h(v16i16 _1){return __builtin_lasx_xvsrai_h(_1, 1);} +-v8i32 __lasx_xvsrai_w(v8i32 _1){return __builtin_lasx_xvsrai_w(_1, 1);} +-v4i64 __lasx_xvsrai_d(v4i64 _1){return __builtin_lasx_xvsrai_d(_1, 1);} +-v32i8 __lasx_xvsrar_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrar_b(_1, _2);} +-v16i16 __lasx_xvsrar_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrar_h(_1, _2);} +-v8i32 __lasx_xvsrar_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrar_w(_1, _2);} +-v4i64 __lasx_xvsrar_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrar_d(_1, _2);} +-v32i8 __lasx_xvsrari_b(v32i8 _1){return __builtin_lasx_xvsrari_b(_1, 1);} +-v16i16 __lasx_xvsrari_h(v16i16 _1){return __builtin_lasx_xvsrari_h(_1, 1);} +-v8i32 __lasx_xvsrari_w(v8i32 _1){return __builtin_lasx_xvsrari_w(_1, 1);} +-v4i64 __lasx_xvsrari_d(v4i64 _1){return __builtin_lasx_xvsrari_d(_1, 1);} +-v32i8 __lasx_xvsrl_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrl_b(_1, _2);} +-v16i16 __lasx_xvsrl_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrl_h(_1, _2);} +-v8i32 __lasx_xvsrl_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrl_w(_1, _2);} +-v4i64 __lasx_xvsrl_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrl_d(_1, _2);} +-v32i8 __lasx_xvsrli_b(v32i8 _1){return __builtin_lasx_xvsrli_b(_1, 1);} +-v16i16 __lasx_xvsrli_h(v16i16 _1){return __builtin_lasx_xvsrli_h(_1, 1);} +-v8i32 __lasx_xvsrli_w(v8i32 _1){return __builtin_lasx_xvsrli_w(_1, 1);} +-v4i64 __lasx_xvsrli_d(v4i64 _1){return __builtin_lasx_xvsrli_d(_1, 1);} +-v32i8 __lasx_xvsrlr_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrlr_b(_1, _2);} +-v16i16 __lasx_xvsrlr_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlr_h(_1, _2);} +-v8i32 __lasx_xvsrlr_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlr_w(_1, _2);} +-v4i64 __lasx_xvsrlr_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlr_d(_1, _2);} +-v32i8 __lasx_xvsrlri_b(v32i8 _1){return __builtin_lasx_xvsrlri_b(_1, 1);} +-v16i16 __lasx_xvsrlri_h(v16i16 _1){return __builtin_lasx_xvsrlri_h(_1, 1);} +-v8i32 __lasx_xvsrlri_w(v8i32 _1){return __builtin_lasx_xvsrlri_w(_1, 1);} +-v4i64 __lasx_xvsrlri_d(v4i64 _1){return __builtin_lasx_xvsrlri_d(_1, 1);} +-v32u8 __lasx_xvbitclr_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitclr_b(_1, _2);} +-v16u16 __lasx_xvbitclr_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvbitclr_h(_1, _2);} +-v8u32 __lasx_xvbitclr_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvbitclr_w(_1, _2);} +-v4u64 __lasx_xvbitclr_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvbitclr_d(_1, _2);} +-v32u8 __lasx_xvbitclri_b(v32u8 _1){return __builtin_lasx_xvbitclri_b(_1, 1);} +-v16u16 __lasx_xvbitclri_h(v16u16 _1){return __builtin_lasx_xvbitclri_h(_1, 1);} +-v8u32 __lasx_xvbitclri_w(v8u32 _1){return __builtin_lasx_xvbitclri_w(_1, 1);} +-v4u64 __lasx_xvbitclri_d(v4u64 _1){return __builtin_lasx_xvbitclri_d(_1, 1);} +-v32u8 __lasx_xvbitset_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitset_b(_1, _2);} +-v16u16 __lasx_xvbitset_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvbitset_h(_1, _2);} +-v8u32 __lasx_xvbitset_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvbitset_w(_1, _2);} +-v4u64 __lasx_xvbitset_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvbitset_d(_1, _2);} +-v32u8 __lasx_xvbitseti_b(v32u8 _1){return __builtin_lasx_xvbitseti_b(_1, 1);} +-v16u16 __lasx_xvbitseti_h(v16u16 _1){return __builtin_lasx_xvbitseti_h(_1, 1);} +-v8u32 __lasx_xvbitseti_w(v8u32 _1){return __builtin_lasx_xvbitseti_w(_1, 1);} +-v4u64 __lasx_xvbitseti_d(v4u64 _1){return __builtin_lasx_xvbitseti_d(_1, 1);} +-v32u8 __lasx_xvbitrev_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitrev_b(_1, _2);} +-v16u16 __lasx_xvbitrev_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvbitrev_h(_1, _2);} +-v8u32 __lasx_xvbitrev_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvbitrev_w(_1, _2);} +-v4u64 __lasx_xvbitrev_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvbitrev_d(_1, _2);} +-v32u8 __lasx_xvbitrevi_b(v32u8 _1){return __builtin_lasx_xvbitrevi_b(_1, 1);} +-v16u16 __lasx_xvbitrevi_h(v16u16 _1){return __builtin_lasx_xvbitrevi_h(_1, 1);} +-v8u32 __lasx_xvbitrevi_w(v8u32 _1){return __builtin_lasx_xvbitrevi_w(_1, 1);} +-v4u64 __lasx_xvbitrevi_d(v4u64 _1){return __builtin_lasx_xvbitrevi_d(_1, 1);} +-v32i8 __lasx_xvadd_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvadd_b(_1, _2);} +-v16i16 __lasx_xvadd_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvadd_h(_1, _2);} +-v8i32 __lasx_xvadd_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvadd_w(_1, _2);} +-v4i64 __lasx_xvadd_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvadd_d(_1, _2);} +-v32i8 __lasx_xvaddi_bu(v32i8 _1){return __builtin_lasx_xvaddi_bu(_1, 1);} +-v16i16 __lasx_xvaddi_hu(v16i16 _1){return __builtin_lasx_xvaddi_hu(_1, 1);} +-v8i32 __lasx_xvaddi_wu(v8i32 _1){return __builtin_lasx_xvaddi_wu(_1, 1);} +-v4i64 __lasx_xvaddi_du(v4i64 _1){return __builtin_lasx_xvaddi_du(_1, 1);} +-v32i8 __lasx_xvsub_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsub_b(_1, _2);} +-v16i16 __lasx_xvsub_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsub_h(_1, _2);} +-v8i32 __lasx_xvsub_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsub_w(_1, _2);} +-v4i64 __lasx_xvsub_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsub_d(_1, _2);} +-v32i8 __lasx_xvsubi_bu(v32i8 _1){return __builtin_lasx_xvsubi_bu(_1, 1);} +-v16i16 __lasx_xvsubi_hu(v16i16 _1){return __builtin_lasx_xvsubi_hu(_1, 1);} +-v8i32 __lasx_xvsubi_wu(v8i32 _1){return __builtin_lasx_xvsubi_wu(_1, 1);} +-v4i64 __lasx_xvsubi_du(v4i64 _1){return __builtin_lasx_xvsubi_du(_1, 1);} +-v32i8 __lasx_xvmax_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmax_b(_1, _2);} +-v16i16 __lasx_xvmax_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmax_h(_1, _2);} +-v8i32 __lasx_xvmax_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmax_w(_1, _2);} +-v4i64 __lasx_xvmax_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmax_d(_1, _2);} +-v32i8 __lasx_xvmaxi_b(v32i8 _1){return __builtin_lasx_xvmaxi_b(_1, 1);} +-v16i16 __lasx_xvmaxi_h(v16i16 _1){return __builtin_lasx_xvmaxi_h(_1, 1);} +-v8i32 __lasx_xvmaxi_w(v8i32 _1){return __builtin_lasx_xvmaxi_w(_1, 1);} +-v4i64 __lasx_xvmaxi_d(v4i64 _1){return __builtin_lasx_xvmaxi_d(_1, 1);} +-v32u8 __lasx_xvmax_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmax_bu(_1, _2);} +-v16u16 __lasx_xvmax_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmax_hu(_1, _2);} +-v8u32 __lasx_xvmax_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmax_wu(_1, _2);} +-v4u64 __lasx_xvmax_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmax_du(_1, _2);} +-v32u8 __lasx_xvmaxi_bu(v32u8 _1){return __builtin_lasx_xvmaxi_bu(_1, 1);} +-v16u16 __lasx_xvmaxi_hu(v16u16 _1){return __builtin_lasx_xvmaxi_hu(_1, 1);} +-v8u32 __lasx_xvmaxi_wu(v8u32 _1){return __builtin_lasx_xvmaxi_wu(_1, 1);} +-v4u64 __lasx_xvmaxi_du(v4u64 _1){return __builtin_lasx_xvmaxi_du(_1, 1);} +-v32i8 __lasx_xvmin_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmin_b(_1, _2);} +-v16i16 __lasx_xvmin_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmin_h(_1, _2);} +-v8i32 __lasx_xvmin_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmin_w(_1, _2);} +-v4i64 __lasx_xvmin_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmin_d(_1, _2);} +-v32i8 __lasx_xvmini_b(v32i8 _1){return __builtin_lasx_xvmini_b(_1, 1);} +-v16i16 __lasx_xvmini_h(v16i16 _1){return __builtin_lasx_xvmini_h(_1, 1);} +-v8i32 __lasx_xvmini_w(v8i32 _1){return __builtin_lasx_xvmini_w(_1, 1);} +-v4i64 __lasx_xvmini_d(v4i64 _1){return __builtin_lasx_xvmini_d(_1, 1);} +-v32u8 __lasx_xvmin_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmin_bu(_1, _2);} +-v16u16 __lasx_xvmin_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmin_hu(_1, _2);} +-v8u32 __lasx_xvmin_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmin_wu(_1, _2);} +-v4u64 __lasx_xvmin_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmin_du(_1, _2);} +-v32u8 __lasx_xvmini_bu(v32u8 _1){return __builtin_lasx_xvmini_bu(_1, 1);} +-v16u16 __lasx_xvmini_hu(v16u16 _1){return __builtin_lasx_xvmini_hu(_1, 1);} +-v8u32 __lasx_xvmini_wu(v8u32 _1){return __builtin_lasx_xvmini_wu(_1, 1);} +-v4u64 __lasx_xvmini_du(v4u64 _1){return __builtin_lasx_xvmini_du(_1, 1);} +-v32i8 __lasx_xvseq_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvseq_b(_1, _2);} +-v16i16 __lasx_xvseq_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvseq_h(_1, _2);} +-v8i32 __lasx_xvseq_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvseq_w(_1, _2);} +-v4i64 __lasx_xvseq_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvseq_d(_1, _2);} +-v32i8 __lasx_xvseqi_b(v32i8 _1){return __builtin_lasx_xvseqi_b(_1, 1);} +-v16i16 __lasx_xvseqi_h(v16i16 _1){return __builtin_lasx_xvseqi_h(_1, 1);} +-v8i32 __lasx_xvseqi_w(v8i32 _1){return __builtin_lasx_xvseqi_w(_1, 1);} +-v4i64 __lasx_xvseqi_d(v4i64 _1){return __builtin_lasx_xvseqi_d(_1, 1);} +-v32i8 __lasx_xvslt_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvslt_b(_1, _2);} +-v16i16 __lasx_xvslt_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvslt_h(_1, _2);} +-v8i32 __lasx_xvslt_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvslt_w(_1, _2);} +-v4i64 __lasx_xvslt_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvslt_d(_1, _2);} +-v32i8 __lasx_xvslti_b(v32i8 _1){return __builtin_lasx_xvslti_b(_1, 1);} +-v16i16 __lasx_xvslti_h(v16i16 _1){return __builtin_lasx_xvslti_h(_1, 1);} +-v8i32 __lasx_xvslti_w(v8i32 _1){return __builtin_lasx_xvslti_w(_1, 1);} +-v4i64 __lasx_xvslti_d(v4i64 _1){return __builtin_lasx_xvslti_d(_1, 1);} +-v32i8 __lasx_xvslt_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvslt_bu(_1, _2);} +-v16i16 __lasx_xvslt_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvslt_hu(_1, _2);} +-v8i32 __lasx_xvslt_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvslt_wu(_1, _2);} +-v4i64 __lasx_xvslt_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvslt_du(_1, _2);} +-v32i8 __lasx_xvslti_bu(v32u8 _1){return __builtin_lasx_xvslti_bu(_1, 1);} +-v16i16 __lasx_xvslti_hu(v16u16 _1){return __builtin_lasx_xvslti_hu(_1, 1);} +-v8i32 __lasx_xvslti_wu(v8u32 _1){return __builtin_lasx_xvslti_wu(_1, 1);} +-v4i64 __lasx_xvslti_du(v4u64 _1){return __builtin_lasx_xvslti_du(_1, 1);} +-v32i8 __lasx_xvsle_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsle_b(_1, _2);} +-v16i16 __lasx_xvsle_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsle_h(_1, _2);} +-v8i32 __lasx_xvsle_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsle_w(_1, _2);} +-v4i64 __lasx_xvsle_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsle_d(_1, _2);} +-v32i8 __lasx_xvslei_b(v32i8 _1){return __builtin_lasx_xvslei_b(_1, 1);} +-v16i16 __lasx_xvslei_h(v16i16 _1){return __builtin_lasx_xvslei_h(_1, 1);} +-v8i32 __lasx_xvslei_w(v8i32 _1){return __builtin_lasx_xvslei_w(_1, 1);} +-v4i64 __lasx_xvslei_d(v4i64 _1){return __builtin_lasx_xvslei_d(_1, 1);} +-v32i8 __lasx_xvsle_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsle_bu(_1, _2);} +-v16i16 __lasx_xvsle_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsle_hu(_1, _2);} +-v8i32 __lasx_xvsle_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsle_wu(_1, _2);} +-v4i64 __lasx_xvsle_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsle_du(_1, _2);} +-v32i8 __lasx_xvslei_bu(v32u8 _1){return __builtin_lasx_xvslei_bu(_1, 1);} +-v16i16 __lasx_xvslei_hu(v16u16 _1){return __builtin_lasx_xvslei_hu(_1, 1);} +-v8i32 __lasx_xvslei_wu(v8u32 _1){return __builtin_lasx_xvslei_wu(_1, 1);} +-v4i64 __lasx_xvslei_du(v4u64 _1){return __builtin_lasx_xvslei_du(_1, 1);} +-v32i8 __lasx_xvsat_b(v32i8 _1){return __builtin_lasx_xvsat_b(_1, 1);} +-v16i16 __lasx_xvsat_h(v16i16 _1){return __builtin_lasx_xvsat_h(_1, 1);} +-v8i32 __lasx_xvsat_w(v8i32 _1){return __builtin_lasx_xvsat_w(_1, 1);} +-v4i64 __lasx_xvsat_d(v4i64 _1){return __builtin_lasx_xvsat_d(_1, 1);} +-v32u8 __lasx_xvsat_bu(v32u8 _1){return __builtin_lasx_xvsat_bu(_1, 1);} +-v16u16 __lasx_xvsat_hu(v16u16 _1){return __builtin_lasx_xvsat_hu(_1, 1);} +-v8u32 __lasx_xvsat_wu(v8u32 _1){return __builtin_lasx_xvsat_wu(_1, 1);} +-v4u64 __lasx_xvsat_du(v4u64 _1){return __builtin_lasx_xvsat_du(_1, 1);} +-v32i8 __lasx_xvadda_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvadda_b(_1, _2);} +-v16i16 __lasx_xvadda_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvadda_h(_1, _2);} +-v8i32 __lasx_xvadda_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvadda_w(_1, _2);} +-v4i64 __lasx_xvadda_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvadda_d(_1, _2);} +-v32i8 __lasx_xvsadd_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsadd_b(_1, _2);} +-v16i16 __lasx_xvsadd_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsadd_h(_1, _2);} +-v8i32 __lasx_xvsadd_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsadd_w(_1, _2);} +-v4i64 __lasx_xvsadd_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsadd_d(_1, _2);} +-v32u8 __lasx_xvsadd_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsadd_bu(_1, _2);} +-v16u16 __lasx_xvsadd_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsadd_hu(_1, _2);} +-v8u32 __lasx_xvsadd_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsadd_wu(_1, _2);} +-v4u64 __lasx_xvsadd_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsadd_du(_1, _2);} +-v32i8 __lasx_xvavg_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvavg_b(_1, _2);} +-v16i16 __lasx_xvavg_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvavg_h(_1, _2);} +-v8i32 __lasx_xvavg_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvavg_w(_1, _2);} +-v4i64 __lasx_xvavg_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvavg_d(_1, _2);} +-v32u8 __lasx_xvavg_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvavg_bu(_1, _2);} +-v16u16 __lasx_xvavg_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvavg_hu(_1, _2);} +-v8u32 __lasx_xvavg_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvavg_wu(_1, _2);} +-v4u64 __lasx_xvavg_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvavg_du(_1, _2);} +-v32i8 __lasx_xvavgr_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvavgr_b(_1, _2);} +-v16i16 __lasx_xvavgr_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvavgr_h(_1, _2);} +-v8i32 __lasx_xvavgr_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvavgr_w(_1, _2);} +-v4i64 __lasx_xvavgr_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvavgr_d(_1, _2);} +-v32u8 __lasx_xvavgr_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvavgr_bu(_1, _2);} +-v16u16 __lasx_xvavgr_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvavgr_hu(_1, _2);} +-v8u32 __lasx_xvavgr_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvavgr_wu(_1, _2);} +-v4u64 __lasx_xvavgr_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvavgr_du(_1, _2);} +-v32i8 __lasx_xvssub_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssub_b(_1, _2);} +-v16i16 __lasx_xvssub_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssub_h(_1, _2);} +-v8i32 __lasx_xvssub_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssub_w(_1, _2);} +-v4i64 __lasx_xvssub_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssub_d(_1, _2);} +-v32u8 __lasx_xvssub_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvssub_bu(_1, _2);} +-v16u16 __lasx_xvssub_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssub_hu(_1, _2);} +-v8u32 __lasx_xvssub_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssub_wu(_1, _2);} +-v4u64 __lasx_xvssub_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssub_du(_1, _2);} +-v32i8 __lasx_xvabsd_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvabsd_b(_1, _2);} +-v16i16 __lasx_xvabsd_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvabsd_h(_1, _2);} +-v8i32 __lasx_xvabsd_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvabsd_w(_1, _2);} +-v4i64 __lasx_xvabsd_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvabsd_d(_1, _2);} +-v32u8 __lasx_xvabsd_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvabsd_bu(_1, _2);} +-v16u16 __lasx_xvabsd_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvabsd_hu(_1, _2);} +-v8u32 __lasx_xvabsd_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvabsd_wu(_1, _2);} +-v4u64 __lasx_xvabsd_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvabsd_du(_1, _2);} +-v32i8 __lasx_xvmul_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmul_b(_1, _2);} +-v16i16 __lasx_xvmul_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmul_h(_1, _2);} +-v8i32 __lasx_xvmul_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmul_w(_1, _2);} +-v4i64 __lasx_xvmul_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmul_d(_1, _2);} +-v32i8 __lasx_xvmadd_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmadd_b(_1, _2, _3);} +-v16i16 __lasx_xvmadd_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmadd_h(_1, _2, _3);} +-v8i32 __lasx_xvmadd_w(v8i32 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmadd_w(_1, _2, _3);} +-v4i64 __lasx_xvmadd_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmadd_d(_1, _2, _3);} +-v32i8 __lasx_xvmsub_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmsub_b(_1, _2, _3);} +-v16i16 __lasx_xvmsub_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmsub_h(_1, _2, _3);} +-v8i32 __lasx_xvmsub_w(v8i32 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmsub_w(_1, _2, _3);} +-v4i64 __lasx_xvmsub_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmsub_d(_1, _2, _3);} +-v32i8 __lasx_xvdiv_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvdiv_b(_1, _2);} +-v16i16 __lasx_xvdiv_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvdiv_h(_1, _2);} +-v8i32 __lasx_xvdiv_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvdiv_w(_1, _2);} +-v4i64 __lasx_xvdiv_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvdiv_d(_1, _2);} +-v32u8 __lasx_xvdiv_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvdiv_bu(_1, _2);} +-v16u16 __lasx_xvdiv_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvdiv_hu(_1, _2);} +-v8u32 __lasx_xvdiv_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvdiv_wu(_1, _2);} +-v4u64 __lasx_xvdiv_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvdiv_du(_1, _2);} +-v16i16 __lasx_xvhaddw_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvhaddw_h_b(_1, _2);} +-v8i32 __lasx_xvhaddw_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvhaddw_w_h(_1, _2);} +-v4i64 __lasx_xvhaddw_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvhaddw_d_w(_1, _2);} +-v16u16 __lasx_xvhaddw_hu_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvhaddw_hu_bu(_1, _2);} +-v8u32 __lasx_xvhaddw_wu_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvhaddw_wu_hu(_1, _2);} +-v4u64 __lasx_xvhaddw_du_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvhaddw_du_wu(_1, _2);} +-v16i16 __lasx_xvhsubw_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvhsubw_h_b(_1, _2);} +-v8i32 __lasx_xvhsubw_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvhsubw_w_h(_1, _2);} +-v4i64 __lasx_xvhsubw_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvhsubw_d_w(_1, _2);} +-v16i16 __lasx_xvhsubw_hu_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvhsubw_hu_bu(_1, _2);} +-v8i32 __lasx_xvhsubw_wu_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvhsubw_wu_hu(_1, _2);} +-v4i64 __lasx_xvhsubw_du_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvhsubw_du_wu(_1, _2);} +-v32i8 __lasx_xvmod_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmod_b(_1, _2);} +-v16i16 __lasx_xvmod_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmod_h(_1, _2);} +-v8i32 __lasx_xvmod_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmod_w(_1, _2);} +-v4i64 __lasx_xvmod_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmod_d(_1, _2);} +-v32u8 __lasx_xvmod_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmod_bu(_1, _2);} +-v16u16 __lasx_xvmod_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmod_hu(_1, _2);} +-v8u32 __lasx_xvmod_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmod_wu(_1, _2);} +-v4u64 __lasx_xvmod_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmod_du(_1, _2);} +-v32i8 __lasx_xvrepl128vei_b(v32i8 _1){return __builtin_lasx_xvrepl128vei_b(_1, 1);} +-v16i16 __lasx_xvrepl128vei_h(v16i16 _1){return __builtin_lasx_xvrepl128vei_h(_1, 1);} +-v8i32 __lasx_xvrepl128vei_w(v8i32 _1){return __builtin_lasx_xvrepl128vei_w(_1, 1);} +-v4i64 __lasx_xvrepl128vei_d(v4i64 _1){return __builtin_lasx_xvrepl128vei_d(_1, 1);} +-v32i8 __lasx_xvpickev_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpickev_b(_1, _2);} +-v16i16 __lasx_xvpickev_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpickev_h(_1, _2);} +-v8i32 __lasx_xvpickev_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpickev_w(_1, _2);} +-v4i64 __lasx_xvpickev_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpickev_d(_1, _2);} +-v32i8 __lasx_xvpickod_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpickod_b(_1, _2);} +-v16i16 __lasx_xvpickod_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpickod_h(_1, _2);} +-v8i32 __lasx_xvpickod_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpickod_w(_1, _2);} +-v4i64 __lasx_xvpickod_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpickod_d(_1, _2);} +-v32i8 __lasx_xvilvh_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvilvh_b(_1, _2);} +-v16i16 __lasx_xvilvh_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvilvh_h(_1, _2);} +-v8i32 __lasx_xvilvh_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvilvh_w(_1, _2);} +-v4i64 __lasx_xvilvh_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvilvh_d(_1, _2);} +-v32i8 __lasx_xvilvl_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvilvl_b(_1, _2);} +-v16i16 __lasx_xvilvl_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvilvl_h(_1, _2);} +-v8i32 __lasx_xvilvl_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvilvl_w(_1, _2);} +-v4i64 __lasx_xvilvl_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvilvl_d(_1, _2);} +-v32i8 __lasx_xvpackev_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpackev_b(_1, _2);} +-v16i16 __lasx_xvpackev_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpackev_h(_1, _2);} +-v8i32 __lasx_xvpackev_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpackev_w(_1, _2);} +-v4i64 __lasx_xvpackev_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpackev_d(_1, _2);} +-v32i8 __lasx_xvpackod_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpackod_b(_1, _2);} +-v16i16 __lasx_xvpackod_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvpackod_h(_1, _2);} +-v8i32 __lasx_xvpackod_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpackod_w(_1, _2);} +-v4i64 __lasx_xvpackod_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvpackod_d(_1, _2);} +-v32i8 __lasx_xvshuf_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvshuf_b(_1, _2, _3);} +-v16i16 __lasx_xvshuf_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvshuf_h(_1, _2, _3);} +-v8i32 __lasx_xvshuf_w(v8i32 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvshuf_w(_1, _2, _3);} +-v4i64 __lasx_xvshuf_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvshuf_d(_1, _2, _3);} +-v32u8 __lasx_xvand_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvand_v(_1, _2);} +-v32u8 __lasx_xvandi_b(v32u8 _1){return __builtin_lasx_xvandi_b(_1, 1);} +-v32u8 __lasx_xvor_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvor_v(_1, _2);} +-v32u8 __lasx_xvori_b(v32u8 _1){return __builtin_lasx_xvori_b(_1, 1);} +-v32u8 __lasx_xvnor_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvnor_v(_1, _2);} +-v32u8 __lasx_xvnori_b(v32u8 _1){return __builtin_lasx_xvnori_b(_1, 1);} +-v32u8 __lasx_xvxor_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvxor_v(_1, _2);} +-v32u8 __lasx_xvxori_b(v32u8 _1){return __builtin_lasx_xvxori_b(_1, 1);} +-v32u8 __lasx_xvbitsel_v(v32u8 _1, v32u8 _2, v32u8 _3){return __builtin_lasx_xvbitsel_v(_1, _2, _3);} +-v32u8 __lasx_xvbitseli_b(v32u8 _1, v32u8 _2){return __builtin_lasx_xvbitseli_b(_1, _2, 1);} +-v32i8 __lasx_xvshuf4i_b(v32i8 _1){return __builtin_lasx_xvshuf4i_b(_1, 1);} +-v16i16 __lasx_xvshuf4i_h(v16i16 _1){return __builtin_lasx_xvshuf4i_h(_1, 1);} +-v8i32 __lasx_xvshuf4i_w(v8i32 _1){return __builtin_lasx_xvshuf4i_w(_1, 1);} +-v32i8 __lasx_xvreplgr2vr_b(int _1){return __builtin_lasx_xvreplgr2vr_b(_1);} +-v16i16 __lasx_xvreplgr2vr_h(int _1){return __builtin_lasx_xvreplgr2vr_h(_1);} +-v8i32 __lasx_xvreplgr2vr_w(int _1){return __builtin_lasx_xvreplgr2vr_w(_1);} +-v4i64 __lasx_xvreplgr2vr_d(int _1){return __builtin_lasx_xvreplgr2vr_d(_1);} +-v32i8 __lasx_xvpcnt_b(v32i8 _1){return __builtin_lasx_xvpcnt_b(_1);} +-v16i16 __lasx_xvpcnt_h(v16i16 _1){return __builtin_lasx_xvpcnt_h(_1);} +-v8i32 __lasx_xvpcnt_w(v8i32 _1){return __builtin_lasx_xvpcnt_w(_1);} +-v4i64 __lasx_xvpcnt_d(v4i64 _1){return __builtin_lasx_xvpcnt_d(_1);} +-v32i8 __lasx_xvclo_b(v32i8 _1){return __builtin_lasx_xvclo_b(_1);} +-v16i16 __lasx_xvclo_h(v16i16 _1){return __builtin_lasx_xvclo_h(_1);} +-v8i32 __lasx_xvclo_w(v8i32 _1){return __builtin_lasx_xvclo_w(_1);} +-v4i64 __lasx_xvclo_d(v4i64 _1){return __builtin_lasx_xvclo_d(_1);} +-v32i8 __lasx_xvclz_b(v32i8 _1){return __builtin_lasx_xvclz_b(_1);} +-v16i16 __lasx_xvclz_h(v16i16 _1){return __builtin_lasx_xvclz_h(_1);} +-v8i32 __lasx_xvclz_w(v8i32 _1){return __builtin_lasx_xvclz_w(_1);} +-v4i64 __lasx_xvclz_d(v4i64 _1){return __builtin_lasx_xvclz_d(_1);} +-v8f32 __lasx_xvfadd_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfadd_s(_1, _2);} +-v4f64 __lasx_xvfadd_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfadd_d(_1, _2);} +-v8f32 __lasx_xvfsub_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfsub_s(_1, _2);} +-v4f64 __lasx_xvfsub_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfsub_d(_1, _2);} +-v8f32 __lasx_xvfmul_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmul_s(_1, _2);} +-v4f64 __lasx_xvfmul_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmul_d(_1, _2);} +-v8f32 __lasx_xvfdiv_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfdiv_s(_1, _2);} +-v4f64 __lasx_xvfdiv_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfdiv_d(_1, _2);} +-v16i16 __lasx_xvfcvt_h_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcvt_h_s(_1, _2);} +-v8f32 __lasx_xvfcvt_s_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcvt_s_d(_1, _2);} +-v8f32 __lasx_xvfmin_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmin_s(_1, _2);} +-v4f64 __lasx_xvfmin_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmin_d(_1, _2);} +-v8f32 __lasx_xvfmina_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmina_s(_1, _2);} +-v4f64 __lasx_xvfmina_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmina_d(_1, _2);} +-v8f32 __lasx_xvfmax_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmax_s(_1, _2);} +-v4f64 __lasx_xvfmax_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmax_d(_1, _2);} +-v8f32 __lasx_xvfmaxa_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfmaxa_s(_1, _2);} +-v4f64 __lasx_xvfmaxa_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfmaxa_d(_1, _2);} +-v8i32 __lasx_xvfclass_s(v8f32 _1){return __builtin_lasx_xvfclass_s(_1);} +-v4i64 __lasx_xvfclass_d(v4f64 _1){return __builtin_lasx_xvfclass_d(_1);} +-v8f32 __lasx_xvfsqrt_s(v8f32 _1){return __builtin_lasx_xvfsqrt_s(_1);} +-v4f64 __lasx_xvfsqrt_d(v4f64 _1){return __builtin_lasx_xvfsqrt_d(_1);} +-v8f32 __lasx_xvfrecip_s(v8f32 _1){return __builtin_lasx_xvfrecip_s(_1);} +-v4f64 __lasx_xvfrecip_d(v4f64 _1){return __builtin_lasx_xvfrecip_d(_1);} +-v8f32 __lasx_xvfrint_s(v8f32 _1){return __builtin_lasx_xvfrint_s(_1);} +-v4f64 __lasx_xvfrint_d(v4f64 _1){return __builtin_lasx_xvfrint_d(_1);} +-v8f32 __lasx_xvfrsqrt_s(v8f32 _1){return __builtin_lasx_xvfrsqrt_s(_1);} +-v4f64 __lasx_xvfrsqrt_d(v4f64 _1){return __builtin_lasx_xvfrsqrt_d(_1);} +-v8f32 __lasx_xvflogb_s(v8f32 _1){return __builtin_lasx_xvflogb_s(_1);} +-v4f64 __lasx_xvflogb_d(v4f64 _1){return __builtin_lasx_xvflogb_d(_1);} +-v8f32 __lasx_xvfcvth_s_h(v16i16 _1){return __builtin_lasx_xvfcvth_s_h(_1);} +-v4f64 __lasx_xvfcvth_d_s(v8f32 _1){return __builtin_lasx_xvfcvth_d_s(_1);} +-v8f32 __lasx_xvfcvtl_s_h(v16i16 _1){return __builtin_lasx_xvfcvtl_s_h(_1);} +-v4f64 __lasx_xvfcvtl_d_s(v8f32 _1){return __builtin_lasx_xvfcvtl_d_s(_1);} +-v8i32 __lasx_xvftint_w_s(v8f32 _1){return __builtin_lasx_xvftint_w_s(_1);} +-v4i64 __lasx_xvftint_l_d(v4f64 _1){return __builtin_lasx_xvftint_l_d(_1);} +-v8u32 __lasx_xvftint_wu_s(v8f32 _1){return __builtin_lasx_xvftint_wu_s(_1);} +-v4u64 __lasx_xvftint_lu_d(v4f64 _1){return __builtin_lasx_xvftint_lu_d(_1);} +-v8i32 __lasx_xvftintrz_w_s(v8f32 _1){return __builtin_lasx_xvftintrz_w_s(_1);} +-v4i64 __lasx_xvftintrz_l_d(v4f64 _1){return __builtin_lasx_xvftintrz_l_d(_1);} +-v8u32 __lasx_xvftintrz_wu_s(v8f32 _1){return __builtin_lasx_xvftintrz_wu_s(_1);} +-v4u64 __lasx_xvftintrz_lu_d(v4f64 _1){return __builtin_lasx_xvftintrz_lu_d(_1);} +-v8f32 __lasx_xvffint_s_w(v8i32 _1){return __builtin_lasx_xvffint_s_w(_1);} +-v4f64 __lasx_xvffint_d_l(v4i64 _1){return __builtin_lasx_xvffint_d_l(_1);} +-v8f32 __lasx_xvffint_s_wu(v8u32 _1){return __builtin_lasx_xvffint_s_wu(_1);} +-v4f64 __lasx_xvffint_d_lu(v4u64 _1){return __builtin_lasx_xvffint_d_lu(_1);} +-v32i8 __lasx_xvreplve_b(v32i8 _1, int _2){return __builtin_lasx_xvreplve_b(_1, _2);} +-v16i16 __lasx_xvreplve_h(v16i16 _1, int _2){return __builtin_lasx_xvreplve_h(_1, _2);} +-v8i32 __lasx_xvreplve_w(v8i32 _1, int _2){return __builtin_lasx_xvreplve_w(_1, _2);} +-v4i64 __lasx_xvreplve_d(v4i64 _1, int _2){return __builtin_lasx_xvreplve_d(_1, _2);} +-v8i32 __lasx_xvpermi_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvpermi_w(_1, _2, 1);} +-v32u8 __lasx_xvandn_v(v32u8 _1, v32u8 _2){return __builtin_lasx_xvandn_v(_1, _2);} +-v32i8 __lasx_xvneg_b(v32i8 _1){return __builtin_lasx_xvneg_b(_1);} +-v16i16 __lasx_xvneg_h(v16i16 _1){return __builtin_lasx_xvneg_h(_1);} +-v8i32 __lasx_xvneg_w(v8i32 _1){return __builtin_lasx_xvneg_w(_1);} +-v4i64 __lasx_xvneg_d(v4i64 _1){return __builtin_lasx_xvneg_d(_1);} +-v32i8 __lasx_xvmuh_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmuh_b(_1, _2);} +-v16i16 __lasx_xvmuh_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmuh_h(_1, _2);} +-v8i32 __lasx_xvmuh_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmuh_w(_1, _2);} +-v4i64 __lasx_xvmuh_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmuh_d(_1, _2);} +-v32u8 __lasx_xvmuh_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmuh_bu(_1, _2);} +-v16u16 __lasx_xvmuh_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmuh_hu(_1, _2);} +-v8u32 __lasx_xvmuh_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmuh_wu(_1, _2);} +-v4u64 __lasx_xvmuh_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmuh_du(_1, _2);} +-v16i16 __lasx_xvsllwil_h_b(v32i8 _1){return __builtin_lasx_xvsllwil_h_b(_1, 1);} +-v8i32 __lasx_xvsllwil_w_h(v16i16 _1){return __builtin_lasx_xvsllwil_w_h(_1, 1);} +-v4i64 __lasx_xvsllwil_d_w(v8i32 _1){return __builtin_lasx_xvsllwil_d_w(_1, 1);} +-v16u16 __lasx_xvsllwil_hu_bu(v32u8 _1){return __builtin_lasx_xvsllwil_hu_bu(_1, 1);} +-v8u32 __lasx_xvsllwil_wu_hu(v16u16 _1){return __builtin_lasx_xvsllwil_wu_hu(_1, 1);} +-v4u64 __lasx_xvsllwil_du_wu(v8u32 _1){return __builtin_lasx_xvsllwil_du_wu(_1, 1);} +-v32i8 __lasx_xvsran_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsran_b_h(_1, _2);} +-v16i16 __lasx_xvsran_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsran_h_w(_1, _2);} +-v8i32 __lasx_xvsran_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsran_w_d(_1, _2);} +-v32i8 __lasx_xvssran_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssran_b_h(_1, _2);} +-v16i16 __lasx_xvssran_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssran_h_w(_1, _2);} +-v8i32 __lasx_xvssran_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssran_w_d(_1, _2);} +-v32u8 __lasx_xvssran_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssran_bu_h(_1, _2);} +-v16u16 __lasx_xvssran_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssran_hu_w(_1, _2);} +-v8u32 __lasx_xvssran_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssran_wu_d(_1, _2);} +-v32i8 __lasx_xvsrarn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrarn_b_h(_1, _2);} +-v16i16 __lasx_xvsrarn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrarn_h_w(_1, _2);} +-v8i32 __lasx_xvsrarn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrarn_w_d(_1, _2);} +-v32i8 __lasx_xvssrarn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrarn_b_h(_1, _2);} +-v16i16 __lasx_xvssrarn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrarn_h_w(_1, _2);} +-v8i32 __lasx_xvssrarn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrarn_w_d(_1, _2);} +-v32u8 __lasx_xvssrarn_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssrarn_bu_h(_1, _2);} +-v16u16 __lasx_xvssrarn_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssrarn_hu_w(_1, _2);} +-v8u32 __lasx_xvssrarn_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssrarn_wu_d(_1, _2);} +-v32i8 __lasx_xvsrln_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrln_b_h(_1, _2);} +-v16i16 __lasx_xvsrln_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrln_h_w(_1, _2);} +-v8i32 __lasx_xvsrln_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrln_w_d(_1, _2);} +-v32u8 __lasx_xvssrln_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssrln_bu_h(_1, _2);} +-v16u16 __lasx_xvssrln_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssrln_hu_w(_1, _2);} +-v8u32 __lasx_xvssrln_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssrln_wu_d(_1, _2);} +-v32i8 __lasx_xvsrlrn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlrn_b_h(_1, _2);} +-v16i16 __lasx_xvsrlrn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlrn_h_w(_1, _2);} +-v8i32 __lasx_xvsrlrn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlrn_w_d(_1, _2);} +-v32u8 __lasx_xvssrlrn_bu_h(v16u16 _1, v16u16 _2){return __builtin_lasx_xvssrlrn_bu_h(_1, _2);} +-v16u16 __lasx_xvssrlrn_hu_w(v8u32 _1, v8u32 _2){return __builtin_lasx_xvssrlrn_hu_w(_1, _2);} +-v8u32 __lasx_xvssrlrn_wu_d(v4u64 _1, v4u64 _2){return __builtin_lasx_xvssrlrn_wu_d(_1, _2);} +-v32i8 __lasx_xvfrstpi_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvfrstpi_b(_1, _2, 1);} +-v16i16 __lasx_xvfrstpi_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvfrstpi_h(_1, _2, 1);} +-v32i8 __lasx_xvfrstp_b(v32i8 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvfrstp_b(_1, _2, _3);} +-v16i16 __lasx_xvfrstp_h(v16i16 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvfrstp_h(_1, _2, _3);} +-v4i64 __lasx_xvshuf4i_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvshuf4i_d(_1, _2, 1);} +-v32i8 __lasx_xvbsrl_v(v32i8 _1){return __builtin_lasx_xvbsrl_v(_1, 1);} +-v32i8 __lasx_xvbsll_v(v32i8 _1){return __builtin_lasx_xvbsll_v(_1, 1);} +-v32i8 __lasx_xvextrins_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvextrins_b(_1, _2, 1);} +-v16i16 __lasx_xvextrins_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvextrins_h(_1, _2, 1);} +-v8i32 __lasx_xvextrins_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvextrins_w(_1, _2, 1);} +-v4i64 __lasx_xvextrins_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvextrins_d(_1, _2, 1);} +-v32i8 __lasx_xvmskltz_b(v32i8 _1){return __builtin_lasx_xvmskltz_b(_1);} +-v16i16 __lasx_xvmskltz_h(v16i16 _1){return __builtin_lasx_xvmskltz_h(_1);} +-v8i32 __lasx_xvmskltz_w(v8i32 _1){return __builtin_lasx_xvmskltz_w(_1);} +-v4i64 __lasx_xvmskltz_d(v4i64 _1){return __builtin_lasx_xvmskltz_d(_1);} +-v32i8 __lasx_xvsigncov_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsigncov_b(_1, _2);} +-v16i16 __lasx_xvsigncov_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsigncov_h(_1, _2);} +-v8i32 __lasx_xvsigncov_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsigncov_w(_1, _2);} +-v4i64 __lasx_xvsigncov_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsigncov_d(_1, _2);} +-v8f32 __lasx_xvfmadd_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfmadd_s(_1, _2, _3);} +-v4f64 __lasx_xvfmadd_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfmadd_d(_1, _2, _3);} +-v8f32 __lasx_xvfmsub_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfmsub_s(_1, _2, _3);} +-v4f64 __lasx_xvfmsub_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfmsub_d(_1, _2, _3);} +-v8f32 __lasx_xvfnmadd_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfnmadd_s(_1, _2, _3);} +-v4f64 __lasx_xvfnmadd_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfnmadd_d(_1, _2, _3);} +-v8f32 __lasx_xvfnmsub_s(v8f32 _1, v8f32 _2, v8f32 _3){return __builtin_lasx_xvfnmsub_s(_1, _2, _3);} +-v4f64 __lasx_xvfnmsub_d(v4f64 _1, v4f64 _2, v4f64 _3){return __builtin_lasx_xvfnmsub_d(_1, _2, _3);} +-v8i32 __lasx_xvftintrne_w_s(v8f32 _1){return __builtin_lasx_xvftintrne_w_s(_1);} +-v4i64 __lasx_xvftintrne_l_d(v4f64 _1){return __builtin_lasx_xvftintrne_l_d(_1);} +-v8i32 __lasx_xvftintrp_w_s(v8f32 _1){return __builtin_lasx_xvftintrp_w_s(_1);} +-v4i64 __lasx_xvftintrp_l_d(v4f64 _1){return __builtin_lasx_xvftintrp_l_d(_1);} +-v8i32 __lasx_xvftintrm_w_s(v8f32 _1){return __builtin_lasx_xvftintrm_w_s(_1);} +-v4i64 __lasx_xvftintrm_l_d(v4f64 _1){return __builtin_lasx_xvftintrm_l_d(_1);} +-v8i32 __lasx_xvftint_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftint_w_d(_1, _2);} +-v8f32 __lasx_xvffint_s_l(v4i64 _1, v4i64 _2){return __builtin_lasx_xvffint_s_l(_1, _2);} +-v8i32 __lasx_xvftintrz_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrz_w_d(_1, _2);} +-v8i32 __lasx_xvftintrp_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrp_w_d(_1, _2);} +-v8i32 __lasx_xvftintrm_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrm_w_d(_1, _2);} +-v8i32 __lasx_xvftintrne_w_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvftintrne_w_d(_1, _2);} +-v4i64 __lasx_xvftinth_l_s(v8f32 _1){return __builtin_lasx_xvftinth_l_s(_1);} +-v4i64 __lasx_xvftintl_l_s(v8f32 _1){return __builtin_lasx_xvftintl_l_s(_1);} +-v4f64 __lasx_xvffinth_d_w(v8i32 _1){return __builtin_lasx_xvffinth_d_w(_1);} +-v4f64 __lasx_xvffintl_d_w(v8i32 _1){return __builtin_lasx_xvffintl_d_w(_1);} +-v4i64 __lasx_xvftintrzh_l_s(v8f32 _1){return __builtin_lasx_xvftintrzh_l_s(_1);} +-v4i64 __lasx_xvftintrzl_l_s(v8f32 _1){return __builtin_lasx_xvftintrzl_l_s(_1);} +-v4i64 __lasx_xvftintrph_l_s(v8f32 _1){return __builtin_lasx_xvftintrph_l_s(_1);} +-v4i64 __lasx_xvftintrpl_l_s(v8f32 _1){return __builtin_lasx_xvftintrpl_l_s(_1);} +-v4i64 __lasx_xvftintrmh_l_s(v8f32 _1){return __builtin_lasx_xvftintrmh_l_s(_1);} +-v4i64 __lasx_xvftintrml_l_s(v8f32 _1){return __builtin_lasx_xvftintrml_l_s(_1);} +-v4i64 __lasx_xvftintrneh_l_s(v8f32 _1){return __builtin_lasx_xvftintrneh_l_s(_1);} +-v4i64 __lasx_xvftintrnel_l_s(v8f32 _1){return __builtin_lasx_xvftintrnel_l_s(_1);} +-v8i32 __lasx_xvfrintrne_s(v8f32 _1){return __builtin_lasx_xvfrintrne_s(_1);} +-v4i64 __lasx_xvfrintrne_d(v4f64 _1){return __builtin_lasx_xvfrintrne_d(_1);} +-v8i32 __lasx_xvfrintrz_s(v8f32 _1){return __builtin_lasx_xvfrintrz_s(_1);} +-v4i64 __lasx_xvfrintrz_d(v4f64 _1){return __builtin_lasx_xvfrintrz_d(_1);} +-v8i32 __lasx_xvfrintrp_s(v8f32 _1){return __builtin_lasx_xvfrintrp_s(_1);} +-v4i64 __lasx_xvfrintrp_d(v4f64 _1){return __builtin_lasx_xvfrintrp_d(_1);} +-v8i32 __lasx_xvfrintrm_s(v8f32 _1){return __builtin_lasx_xvfrintrm_s(_1);} +-v4i64 __lasx_xvfrintrm_d(v4f64 _1){return __builtin_lasx_xvfrintrm_d(_1);} +-v32i8 __lasx_xvld(void * _1){return __builtin_lasx_xvld(_1, 1);} +-void __lasx_xvst(v32i8 _1, void * _2){return __builtin_lasx_xvst(_1, _2, 1);} +-void __lasx_xvstelm_b(v32i8 _1, void * _2){return __builtin_lasx_xvstelm_b(_1, _2, 1, 1);} +-void __lasx_xvstelm_h(v16i16 _1, void * _2){return __builtin_lasx_xvstelm_h(_1, _2, 2, 1);} +-void __lasx_xvstelm_w(v8i32 _1, void * _2){return __builtin_lasx_xvstelm_w(_1, _2, 4, 1);} +-void __lasx_xvstelm_d(v4i64 _1, void * _2){return __builtin_lasx_xvstelm_d(_1, _2, 8, 1);} +-v8i32 __lasx_xvinsve0_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvinsve0_w(_1, _2, 1);} +-v4i64 __lasx_xvinsve0_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvinsve0_d(_1, _2, 1);} +-v8i32 __lasx_xvpickve_w(v8i32 _1){return __builtin_lasx_xvpickve_w(_1, 1);} +-v4i64 __lasx_xvpickve_d(v4i64 _1){return __builtin_lasx_xvpickve_d(_1, 1);} +-v32i8 __lasx_xvssrlrn_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrlrn_b_h(_1, _2);} +-v16i16 __lasx_xvssrlrn_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrlrn_h_w(_1, _2);} +-v8i32 __lasx_xvssrlrn_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrlrn_w_d(_1, _2);} +-v32i8 __lasx_xvssrln_b_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrln_b_h(_1, _2);} +-v16i16 __lasx_xvssrln_h_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrln_h_w(_1, _2);} +-v8i32 __lasx_xvssrln_w_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrln_w_d(_1, _2);} +-v32i8 __lasx_xvorn_v(v32i8 _1, v32i8 _2){return __builtin_lasx_xvorn_v(_1, _2);} +-v4i64 __lasx_xvldi(){return __builtin_lasx_xvldi(1);} +-v32i8 __lasx_xvldx(void * _1){return __builtin_lasx_xvldx(_1, 1);} +-void __lasx_xvstx(v32i8 _1, void * _2){return __builtin_lasx_xvstx(_1, _2, 1);} +-v4u64 __lasx_xvextl_qu_du(v4u64 _1){return __builtin_lasx_xvextl_qu_du(_1);} +-v8i32 __lasx_xvinsgr2vr_w(v8i32 _1){return __builtin_lasx_xvinsgr2vr_w(_1, 1, 1);} +-v4i64 __lasx_xvinsgr2vr_d(v4i64 _1){return __builtin_lasx_xvinsgr2vr_d(_1, 1, 1);} +-v32i8 __lasx_xvreplve0_b(v32i8 _1){return __builtin_lasx_xvreplve0_b(_1);} +-v16i16 __lasx_xvreplve0_h(v16i16 _1){return __builtin_lasx_xvreplve0_h(_1);} +-v8i32 __lasx_xvreplve0_w(v8i32 _1){return __builtin_lasx_xvreplve0_w(_1);} +-v4i64 __lasx_xvreplve0_d(v4i64 _1){return __builtin_lasx_xvreplve0_d(_1);} +-v32i8 __lasx_xvreplve0_q(v32i8 _1){return __builtin_lasx_xvreplve0_q(_1);} +-v16i16 __lasx_vext2xv_h_b(v32i8 _1){return __builtin_lasx_vext2xv_h_b(_1);} +-v8i32 __lasx_vext2xv_w_h(v16i16 _1){return __builtin_lasx_vext2xv_w_h(_1);} +-v4i64 __lasx_vext2xv_d_w(v8i32 _1){return __builtin_lasx_vext2xv_d_w(_1);} +-v8i32 __lasx_vext2xv_w_b(v32i8 _1){return __builtin_lasx_vext2xv_w_b(_1);} +-v4i64 __lasx_vext2xv_d_h(v16i16 _1){return __builtin_lasx_vext2xv_d_h(_1);} +-v4i64 __lasx_vext2xv_d_b(v32i8 _1){return __builtin_lasx_vext2xv_d_b(_1);} +-v16i16 __lasx_vext2xv_hu_bu(v32i8 _1){return __builtin_lasx_vext2xv_hu_bu(_1);} +-v8i32 __lasx_vext2xv_wu_hu(v16i16 _1){return __builtin_lasx_vext2xv_wu_hu(_1);} +-v4i64 __lasx_vext2xv_du_wu(v8i32 _1){return __builtin_lasx_vext2xv_du_wu(_1);} +-v8i32 __lasx_vext2xv_wu_bu(v32i8 _1){return __builtin_lasx_vext2xv_wu_bu(_1);} +-v4i64 __lasx_vext2xv_du_hu(v16i16 _1){return __builtin_lasx_vext2xv_du_hu(_1);} +-v4i64 __lasx_vext2xv_du_bu(v32i8 _1){return __builtin_lasx_vext2xv_du_bu(_1);} +-v32i8 __lasx_xvpermi_q(v32i8 _1, v32i8 _2){return __builtin_lasx_xvpermi_q(_1, _2, 1);} +-v4i64 __lasx_xvpermi_d(v4i64 _1){return __builtin_lasx_xvpermi_d(_1, 1);} +-v8i32 __lasx_xvperm_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvperm_w(_1, _2);} +-v32i8 __lasx_xvldrepl_b(void * _1){return __builtin_lasx_xvldrepl_b(_1, 1);} +-v16i16 __lasx_xvldrepl_h(void * _1){return __builtin_lasx_xvldrepl_h(_1, 2);} +-v8i32 __lasx_xvldrepl_w(void * _1){return __builtin_lasx_xvldrepl_w(_1, 4);} +-v4i64 __lasx_xvldrepl_d(void * _1){return __builtin_lasx_xvldrepl_d(_1, 8);} +-int __lasx_xvpickve2gr_w(v8i32 _1){return __builtin_lasx_xvpickve2gr_w(_1, 1);} +-unsigned int __lasx_xvpickve2gr_wu(v8i32 _1){return __builtin_lasx_xvpickve2gr_wu(_1, 1);} +-long __lasx_xvpickve2gr_d(v4i64 _1){return __builtin_lasx_xvpickve2gr_d(_1, 1);} +-unsigned long int __lasx_xvpickve2gr_du(v4i64 _1){return __builtin_lasx_xvpickve2gr_du(_1, 1);} +-v4i64 __lasx_xvaddwev_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvaddwev_q_d(_1, _2);} +-v4i64 __lasx_xvaddwev_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvaddwev_d_w(_1, _2);} +-v8i32 __lasx_xvaddwev_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvaddwev_w_h(_1, _2);} +-v16i16 __lasx_xvaddwev_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvaddwev_h_b(_1, _2);} +-v4i64 __lasx_xvaddwev_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvaddwev_q_du(_1, _2);} +-v4i64 __lasx_xvaddwev_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvaddwev_d_wu(_1, _2);} +-v8i32 __lasx_xvaddwev_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvaddwev_w_hu(_1, _2);} +-v16i16 __lasx_xvaddwev_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvaddwev_h_bu(_1, _2);} +-v4i64 __lasx_xvsubwev_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsubwev_q_d(_1, _2);} +-v4i64 __lasx_xvsubwev_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsubwev_d_w(_1, _2);} +-v8i32 __lasx_xvsubwev_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsubwev_w_h(_1, _2);} +-v16i16 __lasx_xvsubwev_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsubwev_h_b(_1, _2);} +-v4i64 __lasx_xvsubwev_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsubwev_q_du(_1, _2);} +-v4i64 __lasx_xvsubwev_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsubwev_d_wu(_1, _2);} +-v8i32 __lasx_xvsubwev_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsubwev_w_hu(_1, _2);} +-v16i16 __lasx_xvsubwev_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsubwev_h_bu(_1, _2);} +-v4i64 __lasx_xvmulwev_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmulwev_q_d(_1, _2);} +-v4i64 __lasx_xvmulwev_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmulwev_d_w(_1, _2);} +-v8i32 __lasx_xvmulwev_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmulwev_w_h(_1, _2);} +-v16i16 __lasx_xvmulwev_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmulwev_h_b(_1, _2);} +-v4i64 __lasx_xvmulwev_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmulwev_q_du(_1, _2);} +-v4i64 __lasx_xvmulwev_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmulwev_d_wu(_1, _2);} +-v8i32 __lasx_xvmulwev_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmulwev_w_hu(_1, _2);} +-v16i16 __lasx_xvmulwev_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmulwev_h_bu(_1, _2);} +-v4i64 __lasx_xvaddwod_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvaddwod_q_d(_1, _2);} +-v4i64 __lasx_xvaddwod_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvaddwod_d_w(_1, _2);} +-v8i32 __lasx_xvaddwod_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvaddwod_w_h(_1, _2);} +-v16i16 __lasx_xvaddwod_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvaddwod_h_b(_1, _2);} +-v4i64 __lasx_xvaddwod_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvaddwod_q_du(_1, _2);} +-v4i64 __lasx_xvaddwod_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvaddwod_d_wu(_1, _2);} +-v8i32 __lasx_xvaddwod_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvaddwod_w_hu(_1, _2);} +-v16i16 __lasx_xvaddwod_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvaddwod_h_bu(_1, _2);} +-v4i64 __lasx_xvsubwod_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsubwod_q_d(_1, _2);} +-v4i64 __lasx_xvsubwod_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsubwod_d_w(_1, _2);} +-v8i32 __lasx_xvsubwod_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsubwod_w_h(_1, _2);} +-v16i16 __lasx_xvsubwod_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsubwod_h_b(_1, _2);} +-v4i64 __lasx_xvsubwod_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvsubwod_q_du(_1, _2);} +-v4i64 __lasx_xvsubwod_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvsubwod_d_wu(_1, _2);} +-v8i32 __lasx_xvsubwod_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvsubwod_w_hu(_1, _2);} +-v16i16 __lasx_xvsubwod_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvsubwod_h_bu(_1, _2);} +-v4i64 __lasx_xvmulwod_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvmulwod_q_d(_1, _2);} +-v4i64 __lasx_xvmulwod_d_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvmulwod_d_w(_1, _2);} +-v8i32 __lasx_xvmulwod_w_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvmulwod_w_h(_1, _2);} +-v16i16 __lasx_xvmulwod_h_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvmulwod_h_b(_1, _2);} +-v4i64 __lasx_xvmulwod_q_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvmulwod_q_du(_1, _2);} +-v4i64 __lasx_xvmulwod_d_wu(v8u32 _1, v8u32 _2){return __builtin_lasx_xvmulwod_d_wu(_1, _2);} +-v8i32 __lasx_xvmulwod_w_hu(v16u16 _1, v16u16 _2){return __builtin_lasx_xvmulwod_w_hu(_1, _2);} +-v16i16 __lasx_xvmulwod_h_bu(v32u8 _1, v32u8 _2){return __builtin_lasx_xvmulwod_h_bu(_1, _2);} +-v4i64 __lasx_xvaddwev_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvaddwev_d_wu_w(_1, _2);} +-v8i32 __lasx_xvaddwev_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvaddwev_w_hu_h(_1, _2);} +-v16i16 __lasx_xvaddwev_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvaddwev_h_bu_b(_1, _2);} +-v4i64 __lasx_xvmulwev_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvmulwev_d_wu_w(_1, _2);} +-v8i32 __lasx_xvmulwev_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvmulwev_w_hu_h(_1, _2);} +-v16i16 __lasx_xvmulwev_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvmulwev_h_bu_b(_1, _2);} +-v4i64 __lasx_xvaddwod_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvaddwod_d_wu_w(_1, _2);} +-v8i32 __lasx_xvaddwod_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvaddwod_w_hu_h(_1, _2);} +-v16i16 __lasx_xvaddwod_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvaddwod_h_bu_b(_1, _2);} +-v4i64 __lasx_xvmulwod_d_wu_w(v8u32 _1, v8i32 _2){return __builtin_lasx_xvmulwod_d_wu_w(_1, _2);} +-v8i32 __lasx_xvmulwod_w_hu_h(v16u16 _1, v16i16 _2){return __builtin_lasx_xvmulwod_w_hu_h(_1, _2);} +-v16i16 __lasx_xvmulwod_h_bu_b(v32u8 _1, v32i8 _2){return __builtin_lasx_xvmulwod_h_bu_b(_1, _2);} +-v4i64 __lasx_xvhaddw_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvhaddw_q_d(_1, _2);} +-v4u64 __lasx_xvhaddw_qu_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvhaddw_qu_du(_1, _2);} +-v4i64 __lasx_xvhsubw_q_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvhsubw_q_d(_1, _2);} +-v4u64 __lasx_xvhsubw_qu_du(v4u64 _1, v4u64 _2){return __builtin_lasx_xvhsubw_qu_du(_1, _2);} +-v4i64 __lasx_xvmaddwev_q_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmaddwev_q_d(_1, _2, _3);} +-v4i64 __lasx_xvmaddwev_d_w(v4i64 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmaddwev_d_w(_1, _2, _3);} +-v8i32 __lasx_xvmaddwev_w_h(v8i32 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmaddwev_w_h(_1, _2, _3);} +-v16i16 __lasx_xvmaddwev_h_b(v16i16 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmaddwev_h_b(_1, _2, _3);} +-v4u64 __lasx_xvmaddwev_q_du(v4u64 _1, v4u64 _2, v4u64 _3){return __builtin_lasx_xvmaddwev_q_du(_1, _2, _3);} +-v4u64 __lasx_xvmaddwev_d_wu(v4u64 _1, v8u32 _2, v8u32 _3){return __builtin_lasx_xvmaddwev_d_wu(_1, _2, _3);} +-v8u32 __lasx_xvmaddwev_w_hu(v8u32 _1, v16u16 _2, v16u16 _3){return __builtin_lasx_xvmaddwev_w_hu(_1, _2, _3);} +-v16u16 __lasx_xvmaddwev_h_bu(v16u16 _1, v32u8 _2, v32u8 _3){return __builtin_lasx_xvmaddwev_h_bu(_1, _2, _3);} +-v4i64 __lasx_xvmaddwod_q_d(v4i64 _1, v4i64 _2, v4i64 _3){return __builtin_lasx_xvmaddwod_q_d(_1, _2, _3);} +-v4i64 __lasx_xvmaddwod_d_w(v4i64 _1, v8i32 _2, v8i32 _3){return __builtin_lasx_xvmaddwod_d_w(_1, _2, _3);} +-v8i32 __lasx_xvmaddwod_w_h(v8i32 _1, v16i16 _2, v16i16 _3){return __builtin_lasx_xvmaddwod_w_h(_1, _2, _3);} +-v16i16 __lasx_xvmaddwod_h_b(v16i16 _1, v32i8 _2, v32i8 _3){return __builtin_lasx_xvmaddwod_h_b(_1, _2, _3);} +-v4u64 __lasx_xvmaddwod_q_du(v4u64 _1, v4u64 _2, v4u64 _3){return __builtin_lasx_xvmaddwod_q_du(_1, _2, _3);} +-v4u64 __lasx_xvmaddwod_d_wu(v4u64 _1, v8u32 _2, v8u32 _3){return __builtin_lasx_xvmaddwod_d_wu(_1, _2, _3);} +-v8u32 __lasx_xvmaddwod_w_hu(v8u32 _1, v16u16 _2, v16u16 _3){return __builtin_lasx_xvmaddwod_w_hu(_1, _2, _3);} +-v16u16 __lasx_xvmaddwod_h_bu(v16u16 _1, v32u8 _2, v32u8 _3){return __builtin_lasx_xvmaddwod_h_bu(_1, _2, _3);} +-v4i64 __lasx_xvmaddwev_q_du_d(v4i64 _1, v4u64 _2, v4i64 _3){return __builtin_lasx_xvmaddwev_q_du_d(_1, _2, _3);} +-v4i64 __lasx_xvmaddwev_d_wu_w(v4i64 _1, v8u32 _2, v8i32 _3){return __builtin_lasx_xvmaddwev_d_wu_w(_1, _2, _3);} +-v8i32 __lasx_xvmaddwev_w_hu_h(v8i32 _1, v16u16 _2, v16i16 _3){return __builtin_lasx_xvmaddwev_w_hu_h(_1, _2, _3);} +-v16i16 __lasx_xvmaddwev_h_bu_b(v16i16 _1, v32u8 _2, v32i8 _3){return __builtin_lasx_xvmaddwev_h_bu_b(_1, _2, _3);} +-v4i64 __lasx_xvmaddwod_q_du_d(v4i64 _1, v4u64 _2, v4i64 _3){return __builtin_lasx_xvmaddwod_q_du_d(_1, _2, _3);} +-v4i64 __lasx_xvmaddwod_d_wu_w(v4i64 _1, v8u32 _2, v8i32 _3){return __builtin_lasx_xvmaddwod_d_wu_w(_1, _2, _3);} +-v8i32 __lasx_xvmaddwod_w_hu_h(v8i32 _1, v16u16 _2, v16i16 _3){return __builtin_lasx_xvmaddwod_w_hu_h(_1, _2, _3);} +-v16i16 __lasx_xvmaddwod_h_bu_b(v16i16 _1, v32u8 _2, v32i8 _3){return __builtin_lasx_xvmaddwod_h_bu_b(_1, _2, _3);} +-v32i8 __lasx_xvrotr_b(v32i8 _1, v32i8 _2){return __builtin_lasx_xvrotr_b(_1, _2);} +-v16i16 __lasx_xvrotr_h(v16i16 _1, v16i16 _2){return __builtin_lasx_xvrotr_h(_1, _2);} +-v8i32 __lasx_xvrotr_w(v8i32 _1, v8i32 _2){return __builtin_lasx_xvrotr_w(_1, _2);} +-v4i64 __lasx_xvrotr_d(v4i64 _1, v4i64 _2){return __builtin_lasx_xvrotr_d(_1, _2);} +-v4i64 __lasx_xvadd_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvadd_q(_1, _2);} +-v4i64 __lasx_xvsub_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsub_q(_1, _2);} +-v4i64 __lasx_xvaddwev_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvaddwev_q_du_d(_1, _2);} +-v4i64 __lasx_xvaddwod_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvaddwod_q_du_d(_1, _2);} +-v4i64 __lasx_xvmulwev_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvmulwev_q_du_d(_1, _2);} +-v4i64 __lasx_xvmulwod_q_du_d(v4u64 _1, v4i64 _2){return __builtin_lasx_xvmulwod_q_du_d(_1, _2);} +-v32i8 __lasx_xvmskgez_b(v32i8 _1){return __builtin_lasx_xvmskgez_b(_1);} +-v32i8 __lasx_xvmsknz_b(v32i8 _1){return __builtin_lasx_xvmsknz_b(_1);} +-v16i16 __lasx_xvexth_h_b(v32i8 _1){return __builtin_lasx_xvexth_h_b(_1);} +-v8i32 __lasx_xvexth_w_h(v16i16 _1){return __builtin_lasx_xvexth_w_h(_1);} +-v4i64 __lasx_xvexth_d_w(v8i32 _1){return __builtin_lasx_xvexth_d_w(_1);} +-v4i64 __lasx_xvexth_q_d(v4i64 _1){return __builtin_lasx_xvexth_q_d(_1);} +-v16u16 __lasx_xvexth_hu_bu(v32u8 _1){return __builtin_lasx_xvexth_hu_bu(_1);} +-v8u32 __lasx_xvexth_wu_hu(v16u16 _1){return __builtin_lasx_xvexth_wu_hu(_1);} +-v4u64 __lasx_xvexth_du_wu(v8u32 _1){return __builtin_lasx_xvexth_du_wu(_1);} +-v4u64 __lasx_xvexth_qu_du(v4u64 _1){return __builtin_lasx_xvexth_qu_du(_1);} +-v32i8 __lasx_xvrotri_b(v32i8 _1){return __builtin_lasx_xvrotri_b(_1, 1);} +-v16i16 __lasx_xvrotri_h(v16i16 _1){return __builtin_lasx_xvrotri_h(_1, 1);} +-v8i32 __lasx_xvrotri_w(v8i32 _1){return __builtin_lasx_xvrotri_w(_1, 1);} +-v4i64 __lasx_xvrotri_d(v4i64 _1){return __builtin_lasx_xvrotri_d(_1, 1);} +-v4i64 __lasx_xvextl_q_d(v4i64 _1){return __builtin_lasx_xvextl_q_d(_1);} +-v32i8 __lasx_xvsrlni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrlni_b_h(_1, _2, 1);} +-v16i16 __lasx_xvsrlni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlni_h_w(_1, _2, 1);} +-v8i32 __lasx_xvsrlni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlni_w_d(_1, _2, 1);} +-v4i64 __lasx_xvsrlni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlni_d_q(_1, _2, 1);} +-v32i8 __lasx_xvsrlrni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrlrni_b_h(_1, _2, 1);} +-v16i16 __lasx_xvsrlrni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrlrni_h_w(_1, _2, 1);} +-v8i32 __lasx_xvsrlrni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrlrni_w_d(_1, _2, 1);} +-v4i64 __lasx_xvsrlrni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrlrni_d_q(_1, _2, 1);} +-v32i8 __lasx_xvssrlni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrlni_b_h(_1, _2, 1);} +-v16i16 __lasx_xvssrlni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrlni_h_w(_1, _2, 1);} +-v8i32 __lasx_xvssrlni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrlni_w_d(_1, _2, 1);} +-v4i64 __lasx_xvssrlni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrlni_d_q(_1, _2, 1);} +-v32u8 __lasx_xvssrlni_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrlni_bu_h(_1, _2, 1);} +-v16u16 __lasx_xvssrlni_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrlni_hu_w(_1, _2, 1);} +-v8u32 __lasx_xvssrlni_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrlni_wu_d(_1, _2, 1);} +-v4u64 __lasx_xvssrlni_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrlni_du_q(_1, _2, 1);} +-v32i8 __lasx_xvssrlrni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrlrni_b_h(_1, _2, 1);} +-v16i16 __lasx_xvssrlrni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrlrni_h_w(_1, _2, 1);} +-v8i32 __lasx_xvssrlrni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrlrni_w_d(_1, _2, 1);} +-v4i64 __lasx_xvssrlrni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrlrni_d_q(_1, _2, 1);} +-v32u8 __lasx_xvssrlrni_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrlrni_bu_h(_1, _2, 1);} +-v16u16 __lasx_xvssrlrni_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrlrni_hu_w(_1, _2, 1);} +-v8u32 __lasx_xvssrlrni_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrlrni_wu_d(_1, _2, 1);} +-v4u64 __lasx_xvssrlrni_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrlrni_du_q(_1, _2, 1);} +-v32i8 __lasx_xvsrani_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrani_b_h(_1, _2, 1);} +-v16i16 __lasx_xvsrani_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrani_h_w(_1, _2, 1);} +-v8i32 __lasx_xvsrani_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrani_w_d(_1, _2, 1);} +-v4i64 __lasx_xvsrani_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrani_d_q(_1, _2, 1);} +-v32i8 __lasx_xvsrarni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvsrarni_b_h(_1, _2, 1);} +-v16i16 __lasx_xvsrarni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvsrarni_h_w(_1, _2, 1);} +-v8i32 __lasx_xvsrarni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvsrarni_w_d(_1, _2, 1);} +-v4i64 __lasx_xvsrarni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvsrarni_d_q(_1, _2, 1);} +-v32i8 __lasx_xvssrani_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrani_b_h(_1, _2, 1);} +-v16i16 __lasx_xvssrani_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrani_h_w(_1, _2, 1);} +-v8i32 __lasx_xvssrani_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrani_w_d(_1, _2, 1);} +-v4i64 __lasx_xvssrani_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrani_d_q(_1, _2, 1);} +-v32u8 __lasx_xvssrani_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrani_bu_h(_1, _2, 1);} +-v16u16 __lasx_xvssrani_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrani_hu_w(_1, _2, 1);} +-v8u32 __lasx_xvssrani_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrani_wu_d(_1, _2, 1);} +-v4u64 __lasx_xvssrani_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrani_du_q(_1, _2, 1);} +-v32i8 __lasx_xvssrarni_b_h(v32i8 _1, v32i8 _2){return __builtin_lasx_xvssrarni_b_h(_1, _2, 1);} +-v16i16 __lasx_xvssrarni_h_w(v16i16 _1, v16i16 _2){return __builtin_lasx_xvssrarni_h_w(_1, _2, 1);} +-v8i32 __lasx_xvssrarni_w_d(v8i32 _1, v8i32 _2){return __builtin_lasx_xvssrarni_w_d(_1, _2, 1);} +-v4i64 __lasx_xvssrarni_d_q(v4i64 _1, v4i64 _2){return __builtin_lasx_xvssrarni_d_q(_1, _2, 1);} +-v32u8 __lasx_xvssrarni_bu_h(v32u8 _1, v32i8 _2){return __builtin_lasx_xvssrarni_bu_h(_1, _2, 1);} +-v16u16 __lasx_xvssrarni_hu_w(v16u16 _1, v16i16 _2){return __builtin_lasx_xvssrarni_hu_w(_1, _2, 1);} +-v8u32 __lasx_xvssrarni_wu_d(v8u32 _1, v8i32 _2){return __builtin_lasx_xvssrarni_wu_d(_1, _2, 1);} +-v4u64 __lasx_xvssrarni_du_q(v4u64 _1, v4i64 _2){return __builtin_lasx_xvssrarni_du_q(_1, _2, 1);} +-int __lasx_xbnz_b(v32u8 _1){return __builtin_lasx_xbnz_b(_1);} +-int __lasx_xbnz_d(v4u64 _1){return __builtin_lasx_xbnz_d(_1);} +-int __lasx_xbnz_h(v16u16 _1){return __builtin_lasx_xbnz_h(_1);} +-int __lasx_xbnz_v(v32u8 _1){return __builtin_lasx_xbnz_v(_1);} +-int __lasx_xbnz_w(v8u32 _1){return __builtin_lasx_xbnz_w(_1);} +-int __lasx_xbz_b(v32u8 _1){return __builtin_lasx_xbz_b(_1);} +-int __lasx_xbz_d(v4u64 _1){return __builtin_lasx_xbz_d(_1);} +-int __lasx_xbz_h(v16u16 _1){return __builtin_lasx_xbz_h(_1);} +-int __lasx_xbz_v(v32u8 _1){return __builtin_lasx_xbz_v(_1);} +-int __lasx_xbz_w(v8u32 _1){return __builtin_lasx_xbz_w(_1);} +-v4i64 __lasx_xvfcmp_caf_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_caf_d(_1, _2);} +-v8i32 __lasx_xvfcmp_caf_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_caf_s(_1, _2);} +-v4i64 __lasx_xvfcmp_ceq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_ceq_d(_1, _2);} +-v8i32 __lasx_xvfcmp_ceq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_ceq_s(_1, _2);} +-v4i64 __lasx_xvfcmp_cle_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cle_d(_1, _2);} +-v8i32 __lasx_xvfcmp_cle_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cle_s(_1, _2);} +-v4i64 __lasx_xvfcmp_clt_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_clt_d(_1, _2);} +-v8i32 __lasx_xvfcmp_clt_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_clt_s(_1, _2);} +-v4i64 __lasx_xvfcmp_cne_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cne_d(_1, _2);} +-v8i32 __lasx_xvfcmp_cne_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cne_s(_1, _2);} +-v4i64 __lasx_xvfcmp_cor_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cor_d(_1, _2);} +-v8i32 __lasx_xvfcmp_cor_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cor_s(_1, _2);} +-v4i64 __lasx_xvfcmp_cueq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cueq_d(_1, _2);} +-v8i32 __lasx_xvfcmp_cueq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cueq_s(_1, _2);} +-v4i64 __lasx_xvfcmp_cule_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cule_d(_1, _2);} +-v8i32 __lasx_xvfcmp_cule_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cule_s(_1, _2);} +-v4i64 __lasx_xvfcmp_cult_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cult_d(_1, _2);} +-v8i32 __lasx_xvfcmp_cult_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cult_s(_1, _2);} +-v4i64 __lasx_xvfcmp_cun_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cun_d(_1, _2);} +-v4i64 __lasx_xvfcmp_cune_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_cune_d(_1, _2);} +-v8i32 __lasx_xvfcmp_cune_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cune_s(_1, _2);} +-v8i32 __lasx_xvfcmp_cun_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_cun_s(_1, _2);} +-v4i64 __lasx_xvfcmp_saf_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_saf_d(_1, _2);} +-v8i32 __lasx_xvfcmp_saf_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_saf_s(_1, _2);} +-v4i64 __lasx_xvfcmp_seq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_seq_d(_1, _2);} +-v8i32 __lasx_xvfcmp_seq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_seq_s(_1, _2);} +-v4i64 __lasx_xvfcmp_sle_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sle_d(_1, _2);} +-v8i32 __lasx_xvfcmp_sle_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sle_s(_1, _2);} +-v4i64 __lasx_xvfcmp_slt_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_slt_d(_1, _2);} +-v8i32 __lasx_xvfcmp_slt_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_slt_s(_1, _2);} +-v4i64 __lasx_xvfcmp_sne_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sne_d(_1, _2);} +-v8i32 __lasx_xvfcmp_sne_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sne_s(_1, _2);} +-v4i64 __lasx_xvfcmp_sor_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sor_d(_1, _2);} +-v8i32 __lasx_xvfcmp_sor_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sor_s(_1, _2);} +-v4i64 __lasx_xvfcmp_sueq_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sueq_d(_1, _2);} +-v8i32 __lasx_xvfcmp_sueq_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sueq_s(_1, _2);} +-v4i64 __lasx_xvfcmp_sule_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sule_d(_1, _2);} +-v8i32 __lasx_xvfcmp_sule_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sule_s(_1, _2);} +-v4i64 __lasx_xvfcmp_sult_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sult_d(_1, _2);} +-v8i32 __lasx_xvfcmp_sult_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sult_s(_1, _2);} +-v4i64 __lasx_xvfcmp_sun_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sun_d(_1, _2);} +-v4i64 __lasx_xvfcmp_sune_d(v4f64 _1, v4f64 _2){return __builtin_lasx_xvfcmp_sune_d(_1, _2);} +-v8i32 __lasx_xvfcmp_sune_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sune_s(_1, _2);} +-v8i32 __lasx_xvfcmp_sun_s(v8f32 _1, v8f32 _2){return __builtin_lasx_xvfcmp_sun_s(_1, _2);} +-v4f64 __lasx_xvpickve_d_f(v4f64 _1){return __builtin_lasx_xvpickve_d_f(_1, 1);} +-v8f32 __lasx_xvpickve_w_f(v8f32 _1){return __builtin_lasx_xvpickve_w_f(_1, 1);} +-v32i8 __lasx_xvrepli_b(){return __builtin_lasx_xvrepli_b(1);} +-v4i64 __lasx_xvrepli_d(){return __builtin_lasx_xvrepli_d(1);} +-v16i16 __lasx_xvrepli_h(){return __builtin_lasx_xvrepli_h(1);} +-v8i32 __lasx_xvrepli_w(){return __builtin_lasx_xvrepli_w(1);} ++v32i8 ++__lasx_xvsll_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsll_b (_1, _2); ++} ++v16i16 ++__lasx_xvsll_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsll_h (_1, _2); ++} ++v8i32 ++__lasx_xvsll_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsll_w (_1, _2); ++} ++v4i64 ++__lasx_xvsll_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsll_d (_1, _2); ++} ++v32i8 ++__lasx_xvslli_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvslli_b (_1, 1); ++} ++v16i16 ++__lasx_xvslli_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvslli_h (_1, 1); ++} ++v8i32 ++__lasx_xvslli_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvslli_w (_1, 1); ++} ++v4i64 ++__lasx_xvslli_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvslli_d (_1, 1); ++} ++v32i8 ++__lasx_xvsra_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsra_b (_1, _2); ++} ++v16i16 ++__lasx_xvsra_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsra_h (_1, _2); ++} ++v8i32 ++__lasx_xvsra_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsra_w (_1, _2); ++} ++v4i64 ++__lasx_xvsra_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsra_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrai_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsrai_b (_1, 1); ++} ++v16i16 ++__lasx_xvsrai_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsrai_h (_1, 1); ++} ++v8i32 ++__lasx_xvsrai_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsrai_w (_1, 1); ++} ++v4i64 ++__lasx_xvsrai_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsrai_d (_1, 1); ++} ++v32i8 ++__lasx_xvsrar_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrar_b (_1, _2); ++} ++v16i16 ++__lasx_xvsrar_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrar_h (_1, _2); ++} ++v8i32 ++__lasx_xvsrar_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrar_w (_1, _2); ++} ++v4i64 ++__lasx_xvsrar_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrar_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrari_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsrari_b (_1, 1); ++} ++v16i16 ++__lasx_xvsrari_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsrari_h (_1, 1); ++} ++v8i32 ++__lasx_xvsrari_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsrari_w (_1, 1); ++} ++v4i64 ++__lasx_xvsrari_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsrari_d (_1, 1); ++} ++v32i8 ++__lasx_xvsrl_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrl_b (_1, _2); ++} ++v16i16 ++__lasx_xvsrl_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrl_h (_1, _2); ++} ++v8i32 ++__lasx_xvsrl_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrl_w (_1, _2); ++} ++v4i64 ++__lasx_xvsrl_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrl_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrli_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsrli_b (_1, 1); ++} ++v16i16 ++__lasx_xvsrli_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsrli_h (_1, 1); ++} ++v8i32 ++__lasx_xvsrli_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsrli_w (_1, 1); ++} ++v4i64 ++__lasx_xvsrli_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsrli_d (_1, 1); ++} ++v32i8 ++__lasx_xvsrlr_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrlr_b (_1, _2); ++} ++v16i16 ++__lasx_xvsrlr_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrlr_h (_1, _2); ++} ++v8i32 ++__lasx_xvsrlr_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrlr_w (_1, _2); ++} ++v4i64 ++__lasx_xvsrlr_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrlr_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrlri_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsrlri_b (_1, 1); ++} ++v16i16 ++__lasx_xvsrlri_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsrlri_h (_1, 1); ++} ++v8i32 ++__lasx_xvsrlri_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsrlri_w (_1, 1); ++} ++v4i64 ++__lasx_xvsrlri_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsrlri_d (_1, 1); ++} ++v32u8 ++__lasx_xvbitclr_b (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvbitclr_b (_1, _2); ++} ++v16u16 ++__lasx_xvbitclr_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvbitclr_h (_1, _2); ++} ++v8u32 ++__lasx_xvbitclr_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvbitclr_w (_1, _2); ++} ++v4u64 ++__lasx_xvbitclr_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvbitclr_d (_1, _2); ++} ++v32u8 ++__lasx_xvbitclri_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvbitclri_b (_1, 1); ++} ++v16u16 ++__lasx_xvbitclri_h (v16u16 _1) ++{ ++ return __builtin_lasx_xvbitclri_h (_1, 1); ++} ++v8u32 ++__lasx_xvbitclri_w (v8u32 _1) ++{ ++ return __builtin_lasx_xvbitclri_w (_1, 1); ++} ++v4u64 ++__lasx_xvbitclri_d (v4u64 _1) ++{ ++ return __builtin_lasx_xvbitclri_d (_1, 1); ++} ++v32u8 ++__lasx_xvbitset_b (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvbitset_b (_1, _2); ++} ++v16u16 ++__lasx_xvbitset_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvbitset_h (_1, _2); ++} ++v8u32 ++__lasx_xvbitset_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvbitset_w (_1, _2); ++} ++v4u64 ++__lasx_xvbitset_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvbitset_d (_1, _2); ++} ++v32u8 ++__lasx_xvbitseti_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvbitseti_b (_1, 1); ++} ++v16u16 ++__lasx_xvbitseti_h (v16u16 _1) ++{ ++ return __builtin_lasx_xvbitseti_h (_1, 1); ++} ++v8u32 ++__lasx_xvbitseti_w (v8u32 _1) ++{ ++ return __builtin_lasx_xvbitseti_w (_1, 1); ++} ++v4u64 ++__lasx_xvbitseti_d (v4u64 _1) ++{ ++ return __builtin_lasx_xvbitseti_d (_1, 1); ++} ++v32u8 ++__lasx_xvbitrev_b (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvbitrev_b (_1, _2); ++} ++v16u16 ++__lasx_xvbitrev_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvbitrev_h (_1, _2); ++} ++v8u32 ++__lasx_xvbitrev_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvbitrev_w (_1, _2); ++} ++v4u64 ++__lasx_xvbitrev_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvbitrev_d (_1, _2); ++} ++v32u8 ++__lasx_xvbitrevi_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvbitrevi_b (_1, 1); ++} ++v16u16 ++__lasx_xvbitrevi_h (v16u16 _1) ++{ ++ return __builtin_lasx_xvbitrevi_h (_1, 1); ++} ++v8u32 ++__lasx_xvbitrevi_w (v8u32 _1) ++{ ++ return __builtin_lasx_xvbitrevi_w (_1, 1); ++} ++v4u64 ++__lasx_xvbitrevi_d (v4u64 _1) ++{ ++ return __builtin_lasx_xvbitrevi_d (_1, 1); ++} ++v32i8 ++__lasx_xvadd_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvadd_b (_1, _2); ++} ++v16i16 ++__lasx_xvadd_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvadd_h (_1, _2); ++} ++v8i32 ++__lasx_xvadd_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvadd_w (_1, _2); ++} ++v4i64 ++__lasx_xvadd_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvadd_d (_1, _2); ++} ++v32i8 ++__lasx_xvaddi_bu (v32i8 _1) ++{ ++ return __builtin_lasx_xvaddi_bu (_1, 1); ++} ++v16i16 ++__lasx_xvaddi_hu (v16i16 _1) ++{ ++ return __builtin_lasx_xvaddi_hu (_1, 1); ++} ++v8i32 ++__lasx_xvaddi_wu (v8i32 _1) ++{ ++ return __builtin_lasx_xvaddi_wu (_1, 1); ++} ++v4i64 ++__lasx_xvaddi_du (v4i64 _1) ++{ ++ return __builtin_lasx_xvaddi_du (_1, 1); ++} ++v32i8 ++__lasx_xvsub_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsub_b (_1, _2); ++} ++v16i16 ++__lasx_xvsub_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsub_h (_1, _2); ++} ++v8i32 ++__lasx_xvsub_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsub_w (_1, _2); ++} ++v4i64 ++__lasx_xvsub_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsub_d (_1, _2); ++} ++v32i8 ++__lasx_xvsubi_bu (v32i8 _1) ++{ ++ return __builtin_lasx_xvsubi_bu (_1, 1); ++} ++v16i16 ++__lasx_xvsubi_hu (v16i16 _1) ++{ ++ return __builtin_lasx_xvsubi_hu (_1, 1); ++} ++v8i32 ++__lasx_xvsubi_wu (v8i32 _1) ++{ ++ return __builtin_lasx_xvsubi_wu (_1, 1); ++} ++v4i64 ++__lasx_xvsubi_du (v4i64 _1) ++{ ++ return __builtin_lasx_xvsubi_du (_1, 1); ++} ++v32i8 ++__lasx_xvmax_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmax_b (_1, _2); ++} ++v16i16 ++__lasx_xvmax_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmax_h (_1, _2); ++} ++v8i32 ++__lasx_xvmax_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmax_w (_1, _2); ++} ++v4i64 ++__lasx_xvmax_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmax_d (_1, _2); ++} ++v32i8 ++__lasx_xvmaxi_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmaxi_b (_1, 1); ++} ++v16i16 ++__lasx_xvmaxi_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvmaxi_h (_1, 1); ++} ++v8i32 ++__lasx_xvmaxi_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvmaxi_w (_1, 1); ++} ++v4i64 ++__lasx_xvmaxi_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvmaxi_d (_1, 1); ++} ++v32u8 ++__lasx_xvmax_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmax_bu (_1, _2); ++} ++v16u16 ++__lasx_xvmax_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmax_hu (_1, _2); ++} ++v8u32 ++__lasx_xvmax_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmax_wu (_1, _2); ++} ++v4u64 ++__lasx_xvmax_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmax_du (_1, _2); ++} ++v32u8 ++__lasx_xvmaxi_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvmaxi_bu (_1, 1); ++} ++v16u16 ++__lasx_xvmaxi_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvmaxi_hu (_1, 1); ++} ++v8u32 ++__lasx_xvmaxi_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvmaxi_wu (_1, 1); ++} ++v4u64 ++__lasx_xvmaxi_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvmaxi_du (_1, 1); ++} ++v32i8 ++__lasx_xvmin_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmin_b (_1, _2); ++} ++v16i16 ++__lasx_xvmin_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmin_h (_1, _2); ++} ++v8i32 ++__lasx_xvmin_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmin_w (_1, _2); ++} ++v4i64 ++__lasx_xvmin_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmin_d (_1, _2); ++} ++v32i8 ++__lasx_xvmini_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmini_b (_1, 1); ++} ++v16i16 ++__lasx_xvmini_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvmini_h (_1, 1); ++} ++v8i32 ++__lasx_xvmini_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvmini_w (_1, 1); ++} ++v4i64 ++__lasx_xvmini_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvmini_d (_1, 1); ++} ++v32u8 ++__lasx_xvmin_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmin_bu (_1, _2); ++} ++v16u16 ++__lasx_xvmin_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmin_hu (_1, _2); ++} ++v8u32 ++__lasx_xvmin_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmin_wu (_1, _2); ++} ++v4u64 ++__lasx_xvmin_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmin_du (_1, _2); ++} ++v32u8 ++__lasx_xvmini_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvmini_bu (_1, 1); ++} ++v16u16 ++__lasx_xvmini_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvmini_hu (_1, 1); ++} ++v8u32 ++__lasx_xvmini_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvmini_wu (_1, 1); ++} ++v4u64 ++__lasx_xvmini_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvmini_du (_1, 1); ++} ++v32i8 ++__lasx_xvseq_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvseq_b (_1, _2); ++} ++v16i16 ++__lasx_xvseq_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvseq_h (_1, _2); ++} ++v8i32 ++__lasx_xvseq_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvseq_w (_1, _2); ++} ++v4i64 ++__lasx_xvseq_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvseq_d (_1, _2); ++} ++v32i8 ++__lasx_xvseqi_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvseqi_b (_1, 1); ++} ++v16i16 ++__lasx_xvseqi_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvseqi_h (_1, 1); ++} ++v8i32 ++__lasx_xvseqi_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvseqi_w (_1, 1); ++} ++v4i64 ++__lasx_xvseqi_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvseqi_d (_1, 1); ++} ++v32i8 ++__lasx_xvslt_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvslt_b (_1, _2); ++} ++v16i16 ++__lasx_xvslt_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvslt_h (_1, _2); ++} ++v8i32 ++__lasx_xvslt_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvslt_w (_1, _2); ++} ++v4i64 ++__lasx_xvslt_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvslt_d (_1, _2); ++} ++v32i8 ++__lasx_xvslti_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvslti_b (_1, 1); ++} ++v16i16 ++__lasx_xvslti_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvslti_h (_1, 1); ++} ++v8i32 ++__lasx_xvslti_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvslti_w (_1, 1); ++} ++v4i64 ++__lasx_xvslti_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvslti_d (_1, 1); ++} ++v32i8 ++__lasx_xvslt_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvslt_bu (_1, _2); ++} ++v16i16 ++__lasx_xvslt_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvslt_hu (_1, _2); ++} ++v8i32 ++__lasx_xvslt_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvslt_wu (_1, _2); ++} ++v4i64 ++__lasx_xvslt_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvslt_du (_1, _2); ++} ++v32i8 ++__lasx_xvslti_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvslti_bu (_1, 1); ++} ++v16i16 ++__lasx_xvslti_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvslti_hu (_1, 1); ++} ++v8i32 ++__lasx_xvslti_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvslti_wu (_1, 1); ++} ++v4i64 ++__lasx_xvslti_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvslti_du (_1, 1); ++} ++v32i8 ++__lasx_xvsle_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsle_b (_1, _2); ++} ++v16i16 ++__lasx_xvsle_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsle_h (_1, _2); ++} ++v8i32 ++__lasx_xvsle_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsle_w (_1, _2); ++} ++v4i64 ++__lasx_xvsle_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsle_d (_1, _2); ++} ++v32i8 ++__lasx_xvslei_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvslei_b (_1, 1); ++} ++v16i16 ++__lasx_xvslei_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvslei_h (_1, 1); ++} ++v8i32 ++__lasx_xvslei_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvslei_w (_1, 1); ++} ++v4i64 ++__lasx_xvslei_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvslei_d (_1, 1); ++} ++v32i8 ++__lasx_xvsle_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvsle_bu (_1, _2); ++} ++v16i16 ++__lasx_xvsle_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvsle_hu (_1, _2); ++} ++v8i32 ++__lasx_xvsle_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvsle_wu (_1, _2); ++} ++v4i64 ++__lasx_xvsle_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvsle_du (_1, _2); ++} ++v32i8 ++__lasx_xvslei_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvslei_bu (_1, 1); ++} ++v16i16 ++__lasx_xvslei_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvslei_hu (_1, 1); ++} ++v8i32 ++__lasx_xvslei_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvslei_wu (_1, 1); ++} ++v4i64 ++__lasx_xvslei_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvslei_du (_1, 1); ++} ++v32i8 ++__lasx_xvsat_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsat_b (_1, 1); ++} ++v16i16 ++__lasx_xvsat_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsat_h (_1, 1); ++} ++v8i32 ++__lasx_xvsat_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsat_w (_1, 1); ++} ++v4i64 ++__lasx_xvsat_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvsat_d (_1, 1); ++} ++v32u8 ++__lasx_xvsat_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvsat_bu (_1, 1); ++} ++v16u16 ++__lasx_xvsat_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvsat_hu (_1, 1); ++} ++v8u32 ++__lasx_xvsat_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvsat_wu (_1, 1); ++} ++v4u64 ++__lasx_xvsat_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvsat_du (_1, 1); ++} ++v32i8 ++__lasx_xvadda_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvadda_b (_1, _2); ++} ++v16i16 ++__lasx_xvadda_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvadda_h (_1, _2); ++} ++v8i32 ++__lasx_xvadda_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvadda_w (_1, _2); ++} ++v4i64 ++__lasx_xvadda_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvadda_d (_1, _2); ++} ++v32i8 ++__lasx_xvsadd_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsadd_b (_1, _2); ++} ++v16i16 ++__lasx_xvsadd_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsadd_h (_1, _2); ++} ++v8i32 ++__lasx_xvsadd_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsadd_w (_1, _2); ++} ++v4i64 ++__lasx_xvsadd_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsadd_d (_1, _2); ++} ++v32u8 ++__lasx_xvsadd_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvsadd_bu (_1, _2); ++} ++v16u16 ++__lasx_xvsadd_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvsadd_hu (_1, _2); ++} ++v8u32 ++__lasx_xvsadd_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvsadd_wu (_1, _2); ++} ++v4u64 ++__lasx_xvsadd_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvsadd_du (_1, _2); ++} ++v32i8 ++__lasx_xvavg_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvavg_b (_1, _2); ++} ++v16i16 ++__lasx_xvavg_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvavg_h (_1, _2); ++} ++v8i32 ++__lasx_xvavg_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvavg_w (_1, _2); ++} ++v4i64 ++__lasx_xvavg_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvavg_d (_1, _2); ++} ++v32u8 ++__lasx_xvavg_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvavg_bu (_1, _2); ++} ++v16u16 ++__lasx_xvavg_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvavg_hu (_1, _2); ++} ++v8u32 ++__lasx_xvavg_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvavg_wu (_1, _2); ++} ++v4u64 ++__lasx_xvavg_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvavg_du (_1, _2); ++} ++v32i8 ++__lasx_xvavgr_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvavgr_b (_1, _2); ++} ++v16i16 ++__lasx_xvavgr_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvavgr_h (_1, _2); ++} ++v8i32 ++__lasx_xvavgr_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvavgr_w (_1, _2); ++} ++v4i64 ++__lasx_xvavgr_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvavgr_d (_1, _2); ++} ++v32u8 ++__lasx_xvavgr_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvavgr_bu (_1, _2); ++} ++v16u16 ++__lasx_xvavgr_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvavgr_hu (_1, _2); ++} ++v8u32 ++__lasx_xvavgr_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvavgr_wu (_1, _2); ++} ++v4u64 ++__lasx_xvavgr_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvavgr_du (_1, _2); ++} ++v32i8 ++__lasx_xvssub_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssub_b (_1, _2); ++} ++v16i16 ++__lasx_xvssub_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssub_h (_1, _2); ++} ++v8i32 ++__lasx_xvssub_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssub_w (_1, _2); ++} ++v4i64 ++__lasx_xvssub_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssub_d (_1, _2); ++} ++v32u8 ++__lasx_xvssub_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvssub_bu (_1, _2); ++} ++v16u16 ++__lasx_xvssub_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssub_hu (_1, _2); ++} ++v8u32 ++__lasx_xvssub_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssub_wu (_1, _2); ++} ++v4u64 ++__lasx_xvssub_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssub_du (_1, _2); ++} ++v32i8 ++__lasx_xvabsd_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvabsd_b (_1, _2); ++} ++v16i16 ++__lasx_xvabsd_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvabsd_h (_1, _2); ++} ++v8i32 ++__lasx_xvabsd_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvabsd_w (_1, _2); ++} ++v4i64 ++__lasx_xvabsd_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvabsd_d (_1, _2); ++} ++v32u8 ++__lasx_xvabsd_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvabsd_bu (_1, _2); ++} ++v16u16 ++__lasx_xvabsd_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvabsd_hu (_1, _2); ++} ++v8u32 ++__lasx_xvabsd_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvabsd_wu (_1, _2); ++} ++v4u64 ++__lasx_xvabsd_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvabsd_du (_1, _2); ++} ++v32i8 ++__lasx_xvmul_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmul_b (_1, _2); ++} ++v16i16 ++__lasx_xvmul_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmul_h (_1, _2); ++} ++v8i32 ++__lasx_xvmul_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmul_w (_1, _2); ++} ++v4i64 ++__lasx_xvmul_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmul_d (_1, _2); ++} ++v32i8 ++__lasx_xvmadd_b (v32i8 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmadd_b (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmadd_h (v16i16 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmadd_h (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmadd_w (v8i32 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmadd_w (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmadd_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmadd_d (_1, _2, _3); ++} ++v32i8 ++__lasx_xvmsub_b (v32i8 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmsub_b (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmsub_h (v16i16 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmsub_h (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmsub_w (v8i32 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmsub_w (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmsub_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmsub_d (_1, _2, _3); ++} ++v32i8 ++__lasx_xvdiv_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvdiv_b (_1, _2); ++} ++v16i16 ++__lasx_xvdiv_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvdiv_h (_1, _2); ++} ++v8i32 ++__lasx_xvdiv_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvdiv_w (_1, _2); ++} ++v4i64 ++__lasx_xvdiv_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvdiv_d (_1, _2); ++} ++v32u8 ++__lasx_xvdiv_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvdiv_bu (_1, _2); ++} ++v16u16 ++__lasx_xvdiv_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvdiv_hu (_1, _2); ++} ++v8u32 ++__lasx_xvdiv_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvdiv_wu (_1, _2); ++} ++v4u64 ++__lasx_xvdiv_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvdiv_du (_1, _2); ++} ++v16i16 ++__lasx_xvhaddw_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvhaddw_h_b (_1, _2); ++} ++v8i32 ++__lasx_xvhaddw_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvhaddw_w_h (_1, _2); ++} ++v4i64 ++__lasx_xvhaddw_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvhaddw_d_w (_1, _2); ++} ++v16u16 ++__lasx_xvhaddw_hu_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvhaddw_hu_bu (_1, _2); ++} ++v8u32 ++__lasx_xvhaddw_wu_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvhaddw_wu_hu (_1, _2); ++} ++v4u64 ++__lasx_xvhaddw_du_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvhaddw_du_wu (_1, _2); ++} ++v16i16 ++__lasx_xvhsubw_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvhsubw_h_b (_1, _2); ++} ++v8i32 ++__lasx_xvhsubw_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvhsubw_w_h (_1, _2); ++} ++v4i64 ++__lasx_xvhsubw_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvhsubw_d_w (_1, _2); ++} ++v16i16 ++__lasx_xvhsubw_hu_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvhsubw_hu_bu (_1, _2); ++} ++v8i32 ++__lasx_xvhsubw_wu_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvhsubw_wu_hu (_1, _2); ++} ++v4i64 ++__lasx_xvhsubw_du_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvhsubw_du_wu (_1, _2); ++} ++v32i8 ++__lasx_xvmod_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmod_b (_1, _2); ++} ++v16i16 ++__lasx_xvmod_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmod_h (_1, _2); ++} ++v8i32 ++__lasx_xvmod_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmod_w (_1, _2); ++} ++v4i64 ++__lasx_xvmod_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmod_d (_1, _2); ++} ++v32u8 ++__lasx_xvmod_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmod_bu (_1, _2); ++} ++v16u16 ++__lasx_xvmod_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmod_hu (_1, _2); ++} ++v8u32 ++__lasx_xvmod_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmod_wu (_1, _2); ++} ++v4u64 ++__lasx_xvmod_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmod_du (_1, _2); ++} ++v32i8 ++__lasx_xvrepl128vei_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvrepl128vei_b (_1, 1); ++} ++v16i16 ++__lasx_xvrepl128vei_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvrepl128vei_h (_1, 1); ++} ++v8i32 ++__lasx_xvrepl128vei_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvrepl128vei_w (_1, 1); ++} ++v4i64 ++__lasx_xvrepl128vei_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvrepl128vei_d (_1, 1); ++} ++v32i8 ++__lasx_xvpickev_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpickev_b (_1, _2); ++} ++v16i16 ++__lasx_xvpickev_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvpickev_h (_1, _2); ++} ++v8i32 ++__lasx_xvpickev_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpickev_w (_1, _2); ++} ++v4i64 ++__lasx_xvpickev_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvpickev_d (_1, _2); ++} ++v32i8 ++__lasx_xvpickod_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpickod_b (_1, _2); ++} ++v16i16 ++__lasx_xvpickod_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvpickod_h (_1, _2); ++} ++v8i32 ++__lasx_xvpickod_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpickod_w (_1, _2); ++} ++v4i64 ++__lasx_xvpickod_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvpickod_d (_1, _2); ++} ++v32i8 ++__lasx_xvilvh_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvilvh_b (_1, _2); ++} ++v16i16 ++__lasx_xvilvh_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvilvh_h (_1, _2); ++} ++v8i32 ++__lasx_xvilvh_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvilvh_w (_1, _2); ++} ++v4i64 ++__lasx_xvilvh_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvilvh_d (_1, _2); ++} ++v32i8 ++__lasx_xvilvl_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvilvl_b (_1, _2); ++} ++v16i16 ++__lasx_xvilvl_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvilvl_h (_1, _2); ++} ++v8i32 ++__lasx_xvilvl_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvilvl_w (_1, _2); ++} ++v4i64 ++__lasx_xvilvl_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvilvl_d (_1, _2); ++} ++v32i8 ++__lasx_xvpackev_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpackev_b (_1, _2); ++} ++v16i16 ++__lasx_xvpackev_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvpackev_h (_1, _2); ++} ++v8i32 ++__lasx_xvpackev_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpackev_w (_1, _2); ++} ++v4i64 ++__lasx_xvpackev_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvpackev_d (_1, _2); ++} ++v32i8 ++__lasx_xvpackod_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpackod_b (_1, _2); ++} ++v16i16 ++__lasx_xvpackod_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvpackod_h (_1, _2); ++} ++v8i32 ++__lasx_xvpackod_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpackod_w (_1, _2); ++} ++v4i64 ++__lasx_xvpackod_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvpackod_d (_1, _2); ++} ++v32i8 ++__lasx_xvshuf_b (v32i8 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvshuf_b (_1, _2, _3); ++} ++v16i16 ++__lasx_xvshuf_h (v16i16 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvshuf_h (_1, _2, _3); ++} ++v8i32 ++__lasx_xvshuf_w (v8i32 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvshuf_w (_1, _2, _3); ++} ++v4i64 ++__lasx_xvshuf_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvshuf_d (_1, _2, _3); ++} ++v32u8 ++__lasx_xvand_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvand_v (_1, _2); ++} ++v32u8 ++__lasx_xvandi_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvandi_b (_1, 1); ++} ++v32u8 ++__lasx_xvor_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvor_v (_1, _2); ++} ++v32u8 ++__lasx_xvori_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvori_b (_1, 1); ++} ++v32u8 ++__lasx_xvnor_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvnor_v (_1, _2); ++} ++v32u8 ++__lasx_xvnori_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvnori_b (_1, 1); ++} ++v32u8 ++__lasx_xvxor_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvxor_v (_1, _2); ++} ++v32u8 ++__lasx_xvxori_b (v32u8 _1) ++{ ++ return __builtin_lasx_xvxori_b (_1, 1); ++} ++v32u8 ++__lasx_xvbitsel_v (v32u8 _1, v32u8 _2, v32u8 _3) ++{ ++ return __builtin_lasx_xvbitsel_v (_1, _2, _3); ++} ++v32u8 ++__lasx_xvbitseli_b (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvbitseli_b (_1, _2, 1); ++} ++v32i8 ++__lasx_xvshuf4i_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvshuf4i_b (_1, 1); ++} ++v16i16 ++__lasx_xvshuf4i_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvshuf4i_h (_1, 1); ++} ++v8i32 ++__lasx_xvshuf4i_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvshuf4i_w (_1, 1); ++} ++v32i8 ++__lasx_xvreplgr2vr_b (int _1) ++{ ++ return __builtin_lasx_xvreplgr2vr_b (_1); ++} ++v16i16 ++__lasx_xvreplgr2vr_h (int _1) ++{ ++ return __builtin_lasx_xvreplgr2vr_h (_1); ++} ++v8i32 ++__lasx_xvreplgr2vr_w (int _1) ++{ ++ return __builtin_lasx_xvreplgr2vr_w (_1); ++} ++v4i64 ++__lasx_xvreplgr2vr_d (int _1) ++{ ++ return __builtin_lasx_xvreplgr2vr_d (_1); ++} ++v32i8 ++__lasx_xvpcnt_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvpcnt_b (_1); ++} ++v16i16 ++__lasx_xvpcnt_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvpcnt_h (_1); ++} ++v8i32 ++__lasx_xvpcnt_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvpcnt_w (_1); ++} ++v4i64 ++__lasx_xvpcnt_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvpcnt_d (_1); ++} ++v32i8 ++__lasx_xvclo_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvclo_b (_1); ++} ++v16i16 ++__lasx_xvclo_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvclo_h (_1); ++} ++v8i32 ++__lasx_xvclo_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvclo_w (_1); ++} ++v4i64 ++__lasx_xvclo_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvclo_d (_1); ++} ++v32i8 ++__lasx_xvclz_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvclz_b (_1); ++} ++v16i16 ++__lasx_xvclz_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvclz_h (_1); ++} ++v8i32 ++__lasx_xvclz_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvclz_w (_1); ++} ++v4i64 ++__lasx_xvclz_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvclz_d (_1); ++} ++v8f32 ++__lasx_xvfadd_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfadd_s (_1, _2); ++} ++v4f64 ++__lasx_xvfadd_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfadd_d (_1, _2); ++} ++v8f32 ++__lasx_xvfsub_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfsub_s (_1, _2); ++} ++v4f64 ++__lasx_xvfsub_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfsub_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmul_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmul_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmul_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmul_d (_1, _2); ++} ++v8f32 ++__lasx_xvfdiv_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfdiv_s (_1, _2); ++} ++v4f64 ++__lasx_xvfdiv_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfdiv_d (_1, _2); ++} ++v16i16 ++__lasx_xvfcvt_h_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcvt_h_s (_1, _2); ++} ++v8f32 ++__lasx_xvfcvt_s_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcvt_s_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmin_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmin_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmin_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmin_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmina_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmina_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmina_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmina_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmax_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmax_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmax_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmax_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmaxa_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfmaxa_s (_1, _2); ++} ++v4f64 ++__lasx_xvfmaxa_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfmaxa_d (_1, _2); ++} ++v8i32 ++__lasx_xvfclass_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfclass_s (_1); ++} ++v4i64 ++__lasx_xvfclass_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfclass_d (_1); ++} ++v8f32 ++__lasx_xvfsqrt_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfsqrt_s (_1); ++} ++v4f64 ++__lasx_xvfsqrt_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfsqrt_d (_1); ++} ++v8f32 ++__lasx_xvfrecip_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrecip_s (_1); ++} ++v4f64 ++__lasx_xvfrecip_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrecip_d (_1); ++} ++v8f32 ++__lasx_xvfrint_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrint_s (_1); ++} ++v4f64 ++__lasx_xvfrint_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrint_d (_1); ++} ++v8f32 ++__lasx_xvfrsqrt_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrsqrt_s (_1); ++} ++v4f64 ++__lasx_xvfrsqrt_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrsqrt_d (_1); ++} ++v8f32 ++__lasx_xvflogb_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvflogb_s (_1); ++} ++v4f64 ++__lasx_xvflogb_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvflogb_d (_1); ++} ++v8f32 ++__lasx_xvfcvth_s_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvfcvth_s_h (_1); ++} ++v4f64 ++__lasx_xvfcvth_d_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfcvth_d_s (_1); ++} ++v8f32 ++__lasx_xvfcvtl_s_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvfcvtl_s_h (_1); ++} ++v4f64 ++__lasx_xvfcvtl_d_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfcvtl_d_s (_1); ++} ++v8i32 ++__lasx_xvftint_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftint_w_s (_1); ++} ++v4i64 ++__lasx_xvftint_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftint_l_d (_1); ++} ++v8u32 ++__lasx_xvftint_wu_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftint_wu_s (_1); ++} ++v4u64 ++__lasx_xvftint_lu_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftint_lu_d (_1); ++} ++v8i32 ++__lasx_xvftintrz_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrz_w_s (_1); ++} ++v4i64 ++__lasx_xvftintrz_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrz_l_d (_1); ++} ++v8u32 ++__lasx_xvftintrz_wu_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrz_wu_s (_1); ++} ++v4u64 ++__lasx_xvftintrz_lu_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrz_lu_d (_1); ++} ++v8f32 ++__lasx_xvffint_s_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvffint_s_w (_1); ++} ++v4f64 ++__lasx_xvffint_d_l (v4i64 _1) ++{ ++ return __builtin_lasx_xvffint_d_l (_1); ++} ++v8f32 ++__lasx_xvffint_s_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvffint_s_wu (_1); ++} ++v4f64 ++__lasx_xvffint_d_lu (v4u64 _1) ++{ ++ return __builtin_lasx_xvffint_d_lu (_1); ++} ++v32i8 ++__lasx_xvreplve_b (v32i8 _1, int _2) ++{ ++ return __builtin_lasx_xvreplve_b (_1, _2); ++} ++v16i16 ++__lasx_xvreplve_h (v16i16 _1, int _2) ++{ ++ return __builtin_lasx_xvreplve_h (_1, _2); ++} ++v8i32 ++__lasx_xvreplve_w (v8i32 _1, int _2) ++{ ++ return __builtin_lasx_xvreplve_w (_1, _2); ++} ++v4i64 ++__lasx_xvreplve_d (v4i64 _1, int _2) ++{ ++ return __builtin_lasx_xvreplve_d (_1, _2); ++} ++v8i32 ++__lasx_xvpermi_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvpermi_w (_1, _2, 1); ++} ++v32u8 ++__lasx_xvandn_v (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvandn_v (_1, _2); ++} ++v32i8 ++__lasx_xvneg_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvneg_b (_1); ++} ++v16i16 ++__lasx_xvneg_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvneg_h (_1); ++} ++v8i32 ++__lasx_xvneg_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvneg_w (_1); ++} ++v4i64 ++__lasx_xvneg_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvneg_d (_1); ++} ++v32i8 ++__lasx_xvmuh_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmuh_b (_1, _2); ++} ++v16i16 ++__lasx_xvmuh_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmuh_h (_1, _2); ++} ++v8i32 ++__lasx_xvmuh_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmuh_w (_1, _2); ++} ++v4i64 ++__lasx_xvmuh_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmuh_d (_1, _2); ++} ++v32u8 ++__lasx_xvmuh_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmuh_bu (_1, _2); ++} ++v16u16 ++__lasx_xvmuh_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmuh_hu (_1, _2); ++} ++v8u32 ++__lasx_xvmuh_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmuh_wu (_1, _2); ++} ++v4u64 ++__lasx_xvmuh_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmuh_du (_1, _2); ++} ++v16i16 ++__lasx_xvsllwil_h_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvsllwil_h_b (_1, 1); ++} ++v8i32 ++__lasx_xvsllwil_w_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvsllwil_w_h (_1, 1); ++} ++v4i64 ++__lasx_xvsllwil_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvsllwil_d_w (_1, 1); ++} ++v16u16 ++__lasx_xvsllwil_hu_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvsllwil_hu_bu (_1, 1); ++} ++v8u32 ++__lasx_xvsllwil_wu_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvsllwil_wu_hu (_1, 1); ++} ++v4u64 ++__lasx_xvsllwil_du_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvsllwil_du_wu (_1, 1); ++} ++v32i8 ++__lasx_xvsran_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsran_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvsran_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsran_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvsran_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsran_w_d (_1, _2); ++} ++v32i8 ++__lasx_xvssran_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssran_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvssran_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssran_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvssran_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssran_w_d (_1, _2); ++} ++v32u8 ++__lasx_xvssran_bu_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssran_bu_h (_1, _2); ++} ++v16u16 ++__lasx_xvssran_hu_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssran_hu_w (_1, _2); ++} ++v8u32 ++__lasx_xvssran_wu_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssran_wu_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrarn_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrarn_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvsrarn_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrarn_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvsrarn_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrarn_w_d (_1, _2); ++} ++v32i8 ++__lasx_xvssrarn_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrarn_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvssrarn_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrarn_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvssrarn_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrarn_w_d (_1, _2); ++} ++v32u8 ++__lasx_xvssrarn_bu_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssrarn_bu_h (_1, _2); ++} ++v16u16 ++__lasx_xvssrarn_hu_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssrarn_hu_w (_1, _2); ++} ++v8u32 ++__lasx_xvssrarn_wu_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssrarn_wu_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrln_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrln_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvsrln_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrln_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvsrln_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrln_w_d (_1, _2); ++} ++v32u8 ++__lasx_xvssrln_bu_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssrln_bu_h (_1, _2); ++} ++v16u16 ++__lasx_xvssrln_hu_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssrln_hu_w (_1, _2); ++} ++v8u32 ++__lasx_xvssrln_wu_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssrln_wu_d (_1, _2); ++} ++v32i8 ++__lasx_xvsrlrn_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrlrn_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvsrlrn_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrlrn_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvsrlrn_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrlrn_w_d (_1, _2); ++} ++v32u8 ++__lasx_xvssrlrn_bu_h (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvssrlrn_bu_h (_1, _2); ++} ++v16u16 ++__lasx_xvssrlrn_hu_w (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvssrlrn_hu_w (_1, _2); ++} ++v8u32 ++__lasx_xvssrlrn_wu_d (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvssrlrn_wu_d (_1, _2); ++} ++v32i8 ++__lasx_xvfrstpi_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvfrstpi_b (_1, _2, 1); ++} ++v16i16 ++__lasx_xvfrstpi_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvfrstpi_h (_1, _2, 1); ++} ++v32i8 ++__lasx_xvfrstp_b (v32i8 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvfrstp_b (_1, _2, _3); ++} ++v16i16 ++__lasx_xvfrstp_h (v16i16 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvfrstp_h (_1, _2, _3); ++} ++v4i64 ++__lasx_xvshuf4i_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvshuf4i_d (_1, _2, 1); ++} ++v32i8 ++__lasx_xvbsrl_v (v32i8 _1) ++{ ++ return __builtin_lasx_xvbsrl_v (_1, 1); ++} ++v32i8 ++__lasx_xvbsll_v (v32i8 _1) ++{ ++ return __builtin_lasx_xvbsll_v (_1, 1); ++} ++v32i8 ++__lasx_xvextrins_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvextrins_b (_1, _2, 1); ++} ++v16i16 ++__lasx_xvextrins_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvextrins_h (_1, _2, 1); ++} ++v8i32 ++__lasx_xvextrins_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvextrins_w (_1, _2, 1); ++} ++v4i64 ++__lasx_xvextrins_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvextrins_d (_1, _2, 1); ++} ++v32i8 ++__lasx_xvmskltz_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmskltz_b (_1); ++} ++v16i16 ++__lasx_xvmskltz_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvmskltz_h (_1); ++} ++v8i32 ++__lasx_xvmskltz_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvmskltz_w (_1); ++} ++v4i64 ++__lasx_xvmskltz_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvmskltz_d (_1); ++} ++v32i8 ++__lasx_xvsigncov_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsigncov_b (_1, _2); ++} ++v16i16 ++__lasx_xvsigncov_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsigncov_h (_1, _2); ++} ++v8i32 ++__lasx_xvsigncov_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsigncov_w (_1, _2); ++} ++v4i64 ++__lasx_xvsigncov_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsigncov_d (_1, _2); ++} ++v8f32 ++__lasx_xvfmadd_s (v8f32 _1, v8f32 _2, v8f32 _3) ++{ ++ return __builtin_lasx_xvfmadd_s (_1, _2, _3); ++} ++v4f64 ++__lasx_xvfmadd_d (v4f64 _1, v4f64 _2, v4f64 _3) ++{ ++ return __builtin_lasx_xvfmadd_d (_1, _2, _3); ++} ++v8f32 ++__lasx_xvfmsub_s (v8f32 _1, v8f32 _2, v8f32 _3) ++{ ++ return __builtin_lasx_xvfmsub_s (_1, _2, _3); ++} ++v4f64 ++__lasx_xvfmsub_d (v4f64 _1, v4f64 _2, v4f64 _3) ++{ ++ return __builtin_lasx_xvfmsub_d (_1, _2, _3); ++} ++v8f32 ++__lasx_xvfnmadd_s (v8f32 _1, v8f32 _2, v8f32 _3) ++{ ++ return __builtin_lasx_xvfnmadd_s (_1, _2, _3); ++} ++v4f64 ++__lasx_xvfnmadd_d (v4f64 _1, v4f64 _2, v4f64 _3) ++{ ++ return __builtin_lasx_xvfnmadd_d (_1, _2, _3); ++} ++v8f32 ++__lasx_xvfnmsub_s (v8f32 _1, v8f32 _2, v8f32 _3) ++{ ++ return __builtin_lasx_xvfnmsub_s (_1, _2, _3); ++} ++v4f64 ++__lasx_xvfnmsub_d (v4f64 _1, v4f64 _2, v4f64 _3) ++{ ++ return __builtin_lasx_xvfnmsub_d (_1, _2, _3); ++} ++v8i32 ++__lasx_xvftintrne_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrne_w_s (_1); ++} ++v4i64 ++__lasx_xvftintrne_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrne_l_d (_1); ++} ++v8i32 ++__lasx_xvftintrp_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrp_w_s (_1); ++} ++v4i64 ++__lasx_xvftintrp_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrp_l_d (_1); ++} ++v8i32 ++__lasx_xvftintrm_w_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrm_w_s (_1); ++} ++v4i64 ++__lasx_xvftintrm_l_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvftintrm_l_d (_1); ++} ++v8i32 ++__lasx_xvftint_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftint_w_d (_1, _2); ++} ++v8f32 ++__lasx_xvffint_s_l (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvffint_s_l (_1, _2); ++} ++v8i32 ++__lasx_xvftintrz_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftintrz_w_d (_1, _2); ++} ++v8i32 ++__lasx_xvftintrp_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftintrp_w_d (_1, _2); ++} ++v8i32 ++__lasx_xvftintrm_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftintrm_w_d (_1, _2); ++} ++v8i32 ++__lasx_xvftintrne_w_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvftintrne_w_d (_1, _2); ++} ++v4i64 ++__lasx_xvftinth_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftinth_l_s (_1); ++} ++v4i64 ++__lasx_xvftintl_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintl_l_s (_1); ++} ++v4f64 ++__lasx_xvffinth_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvffinth_d_w (_1); ++} ++v4f64 ++__lasx_xvffintl_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvffintl_d_w (_1); ++} ++v4i64 ++__lasx_xvftintrzh_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrzh_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrzl_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrzl_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrph_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrph_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrpl_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrpl_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrmh_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrmh_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrml_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrml_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrneh_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrneh_l_s (_1); ++} ++v4i64 ++__lasx_xvftintrnel_l_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvftintrnel_l_s (_1); ++} ++v8f32 ++__lasx_xvfrintrne_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrintrne_s (_1); ++} ++v4f64 ++__lasx_xvfrintrne_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrintrne_d (_1); ++} ++v8f32 ++__lasx_xvfrintrz_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrintrz_s (_1); ++} ++v4f64 ++__lasx_xvfrintrz_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrintrz_d (_1); ++} ++v8f32 ++__lasx_xvfrintrp_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrintrp_s (_1); ++} ++v4f64 ++__lasx_xvfrintrp_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrintrp_d (_1); ++} ++v8f32 ++__lasx_xvfrintrm_s (v8f32 _1) ++{ ++ return __builtin_lasx_xvfrintrm_s (_1); ++} ++v4f64 ++__lasx_xvfrintrm_d (v4f64 _1) ++{ ++ return __builtin_lasx_xvfrintrm_d (_1); ++} ++v32i8 ++__lasx_xvld (void *_1) ++{ ++ return __builtin_lasx_xvld (_1, 1); ++} ++void ++__lasx_xvst (v32i8 _1, void *_2) ++{ ++ return __builtin_lasx_xvst (_1, _2, 1); ++} ++void ++__lasx_xvstelm_b (v32i8 _1, void *_2) ++{ ++ return __builtin_lasx_xvstelm_b (_1, _2, 1, 1); ++} ++void ++__lasx_xvstelm_h (v16i16 _1, void *_2) ++{ ++ return __builtin_lasx_xvstelm_h (_1, _2, 2, 1); ++} ++void ++__lasx_xvstelm_w (v8i32 _1, void *_2) ++{ ++ return __builtin_lasx_xvstelm_w (_1, _2, 4, 1); ++} ++void ++__lasx_xvstelm_d (v4i64 _1, void *_2) ++{ ++ return __builtin_lasx_xvstelm_d (_1, _2, 8, 1); ++} ++v8i32 ++__lasx_xvinsve0_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvinsve0_w (_1, _2, 1); ++} ++v4i64 ++__lasx_xvinsve0_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvinsve0_d (_1, _2, 1); ++} ++v8i32 ++__lasx_xvpickve_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvpickve_w (_1, 1); ++} ++v4i64 ++__lasx_xvpickve_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvpickve_d (_1, 1); ++} ++v32i8 ++__lasx_xvssrlrn_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlrn_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvssrlrn_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlrn_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvssrlrn_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlrn_w_d (_1, _2); ++} ++v32i8 ++__lasx_xvssrln_b_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrln_b_h (_1, _2); ++} ++v16i16 ++__lasx_xvssrln_h_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrln_h_w (_1, _2); ++} ++v8i32 ++__lasx_xvssrln_w_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrln_w_d (_1, _2); ++} ++v32i8 ++__lasx_xvorn_v (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvorn_v (_1, _2); ++} ++v4i64 ++__lasx_xvldi () ++{ ++ return __builtin_lasx_xvldi (1); ++} ++v32i8 ++__lasx_xvldx (void *_1) ++{ ++ return __builtin_lasx_xvldx (_1, 1); ++} ++void ++__lasx_xvstx (v32i8 _1, void *_2) ++{ ++ return __builtin_lasx_xvstx (_1, _2, 1); ++} ++v4u64 ++__lasx_xvextl_qu_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvextl_qu_du (_1); ++} ++v8i32 ++__lasx_xvinsgr2vr_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvinsgr2vr_w (_1, 1, 1); ++} ++v4i64 ++__lasx_xvinsgr2vr_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvinsgr2vr_d (_1, 1, 1); ++} ++v32i8 ++__lasx_xvreplve0_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvreplve0_b (_1); ++} ++v16i16 ++__lasx_xvreplve0_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvreplve0_h (_1); ++} ++v8i32 ++__lasx_xvreplve0_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvreplve0_w (_1); ++} ++v4i64 ++__lasx_xvreplve0_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvreplve0_d (_1); ++} ++v32i8 ++__lasx_xvreplve0_q (v32i8 _1) ++{ ++ return __builtin_lasx_xvreplve0_q (_1); ++} ++v16i16 ++__lasx_vext2xv_h_b (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_h_b (_1); ++} ++v8i32 ++__lasx_vext2xv_w_h (v16i16 _1) ++{ ++ return __builtin_lasx_vext2xv_w_h (_1); ++} ++v4i64 ++__lasx_vext2xv_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_vext2xv_d_w (_1); ++} ++v8i32 ++__lasx_vext2xv_w_b (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_w_b (_1); ++} ++v4i64 ++__lasx_vext2xv_d_h (v16i16 _1) ++{ ++ return __builtin_lasx_vext2xv_d_h (_1); ++} ++v4i64 ++__lasx_vext2xv_d_b (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_d_b (_1); ++} ++v16i16 ++__lasx_vext2xv_hu_bu (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_hu_bu (_1); ++} ++v8i32 ++__lasx_vext2xv_wu_hu (v16i16 _1) ++{ ++ return __builtin_lasx_vext2xv_wu_hu (_1); ++} ++v4i64 ++__lasx_vext2xv_du_wu (v8i32 _1) ++{ ++ return __builtin_lasx_vext2xv_du_wu (_1); ++} ++v8i32 ++__lasx_vext2xv_wu_bu (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_wu_bu (_1); ++} ++v4i64 ++__lasx_vext2xv_du_hu (v16i16 _1) ++{ ++ return __builtin_lasx_vext2xv_du_hu (_1); ++} ++v4i64 ++__lasx_vext2xv_du_bu (v32i8 _1) ++{ ++ return __builtin_lasx_vext2xv_du_bu (_1); ++} ++v32i8 ++__lasx_xvpermi_q (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvpermi_q (_1, _2, 1); ++} ++v4i64 ++__lasx_xvpermi_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvpermi_d (_1, 1); ++} ++v8i32 ++__lasx_xvperm_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvperm_w (_1, _2); ++} ++v32i8 ++__lasx_xvldrepl_b (void *_1) ++{ ++ return __builtin_lasx_xvldrepl_b (_1, 1); ++} ++v16i16 ++__lasx_xvldrepl_h (void *_1) ++{ ++ return __builtin_lasx_xvldrepl_h (_1, 2); ++} ++v8i32 ++__lasx_xvldrepl_w (void *_1) ++{ ++ return __builtin_lasx_xvldrepl_w (_1, 4); ++} ++v4i64 ++__lasx_xvldrepl_d (void *_1) ++{ ++ return __builtin_lasx_xvldrepl_d (_1, 8); ++} ++int ++__lasx_xvpickve2gr_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvpickve2gr_w (_1, 1); ++} ++unsigned int ++__lasx_xvpickve2gr_wu (v8i32 _1) ++{ ++ return __builtin_lasx_xvpickve2gr_wu (_1, 1); ++} ++long ++__lasx_xvpickve2gr_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvpickve2gr_d (_1, 1); ++} ++unsigned long int ++__lasx_xvpickve2gr_du (v4i64 _1) ++{ ++ return __builtin_lasx_xvpickve2gr_du (_1, 1); ++} ++v4i64 ++__lasx_xvaddwev_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvaddwev_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvaddwev_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvaddwev_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvaddwev_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvaddwev_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvaddwev_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvaddwev_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvaddwev_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvaddwev_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvaddwev_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvaddwev_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvaddwev_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvsubwev_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsubwev_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvsubwev_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsubwev_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvsubwev_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsubwev_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvsubwev_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsubwev_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvsubwev_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvsubwev_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvsubwev_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvsubwev_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvsubwev_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvsubwev_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvsubwev_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvsubwev_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmulwev_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmulwev_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvmulwev_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmulwev_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvmulwev_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmulwev_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmulwev_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmulwev_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvmulwev_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmulwev_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvmulwev_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmulwev_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvaddwod_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvaddwod_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvaddwod_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvaddwod_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvaddwod_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvaddwod_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvaddwod_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvaddwod_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvaddwod_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvaddwod_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvaddwod_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvaddwod_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvsubwod_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsubwod_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvsubwod_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsubwod_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvsubwod_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsubwod_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvsubwod_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsubwod_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvsubwod_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvsubwod_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvsubwod_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvsubwod_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvsubwod_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvsubwod_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvsubwod_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvsubwod_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmulwod_q_d (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_d_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmulwod_d_w (_1, _2); ++} ++v8i32 ++__lasx_xvmulwod_w_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmulwod_w_h (_1, _2); ++} ++v16i16 ++__lasx_xvmulwod_h_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmulwod_h_b (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_q_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvmulwod_q_du (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_d_wu (v8u32 _1, v8u32 _2) ++{ ++ return __builtin_lasx_xvmulwod_d_wu (_1, _2); ++} ++v8i32 ++__lasx_xvmulwod_w_hu (v16u16 _1, v16u16 _2) ++{ ++ return __builtin_lasx_xvmulwod_w_hu (_1, _2); ++} ++v16i16 ++__lasx_xvmulwod_h_bu (v32u8 _1, v32u8 _2) ++{ ++ return __builtin_lasx_xvmulwod_h_bu (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_d_wu_w (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvaddwev_d_wu_w (_1, _2); ++} ++v8i32 ++__lasx_xvaddwev_w_hu_h (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvaddwev_w_hu_h (_1, _2); ++} ++v16i16 ++__lasx_xvaddwev_h_bu_b (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvaddwev_h_bu_b (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_d_wu_w (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmulwev_d_wu_w (_1, _2); ++} ++v8i32 ++__lasx_xvmulwev_w_hu_h (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmulwev_w_hu_h (_1, _2); ++} ++v16i16 ++__lasx_xvmulwev_h_bu_b (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmulwev_h_bu_b (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_d_wu_w (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvaddwod_d_wu_w (_1, _2); ++} ++v8i32 ++__lasx_xvaddwod_w_hu_h (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvaddwod_w_hu_h (_1, _2); ++} ++v16i16 ++__lasx_xvaddwod_h_bu_b (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvaddwod_h_bu_b (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_d_wu_w (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvmulwod_d_wu_w (_1, _2); ++} ++v8i32 ++__lasx_xvmulwod_w_hu_h (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvmulwod_w_hu_h (_1, _2); ++} ++v16i16 ++__lasx_xvmulwod_h_bu_b (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvmulwod_h_bu_b (_1, _2); ++} ++v4i64 ++__lasx_xvhaddw_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvhaddw_q_d (_1, _2); ++} ++v4u64 ++__lasx_xvhaddw_qu_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvhaddw_qu_du (_1, _2); ++} ++v4i64 ++__lasx_xvhsubw_q_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvhsubw_q_d (_1, _2); ++} ++v4u64 ++__lasx_xvhsubw_qu_du (v4u64 _1, v4u64 _2) ++{ ++ return __builtin_lasx_xvhsubw_qu_du (_1, _2); ++} ++v4i64 ++__lasx_xvmaddwev_q_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmaddwev_q_d (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwev_d_w (v4i64 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmaddwev_d_w (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmaddwev_w_h (v8i32 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmaddwev_w_h (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmaddwev_h_b (v16i16 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmaddwev_h_b (_1, _2, _3); ++} ++v4u64 ++__lasx_xvmaddwev_q_du (v4u64 _1, v4u64 _2, v4u64 _3) ++{ ++ return __builtin_lasx_xvmaddwev_q_du (_1, _2, _3); ++} ++v4u64 ++__lasx_xvmaddwev_d_wu (v4u64 _1, v8u32 _2, v8u32 _3) ++{ ++ return __builtin_lasx_xvmaddwev_d_wu (_1, _2, _3); ++} ++v8u32 ++__lasx_xvmaddwev_w_hu (v8u32 _1, v16u16 _2, v16u16 _3) ++{ ++ return __builtin_lasx_xvmaddwev_w_hu (_1, _2, _3); ++} ++v16u16 ++__lasx_xvmaddwev_h_bu (v16u16 _1, v32u8 _2, v32u8 _3) ++{ ++ return __builtin_lasx_xvmaddwev_h_bu (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwod_q_d (v4i64 _1, v4i64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmaddwod_q_d (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwod_d_w (v4i64 _1, v8i32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmaddwod_d_w (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmaddwod_w_h (v8i32 _1, v16i16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmaddwod_w_h (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmaddwod_h_b (v16i16 _1, v32i8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmaddwod_h_b (_1, _2, _3); ++} ++v4u64 ++__lasx_xvmaddwod_q_du (v4u64 _1, v4u64 _2, v4u64 _3) ++{ ++ return __builtin_lasx_xvmaddwod_q_du (_1, _2, _3); ++} ++v4u64 ++__lasx_xvmaddwod_d_wu (v4u64 _1, v8u32 _2, v8u32 _3) ++{ ++ return __builtin_lasx_xvmaddwod_d_wu (_1, _2, _3); ++} ++v8u32 ++__lasx_xvmaddwod_w_hu (v8u32 _1, v16u16 _2, v16u16 _3) ++{ ++ return __builtin_lasx_xvmaddwod_w_hu (_1, _2, _3); ++} ++v16u16 ++__lasx_xvmaddwod_h_bu (v16u16 _1, v32u8 _2, v32u8 _3) ++{ ++ return __builtin_lasx_xvmaddwod_h_bu (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwev_q_du_d (v4i64 _1, v4u64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmaddwev_q_du_d (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwev_d_wu_w (v4i64 _1, v8u32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmaddwev_d_wu_w (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmaddwev_w_hu_h (v8i32 _1, v16u16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmaddwev_w_hu_h (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmaddwev_h_bu_b (v16i16 _1, v32u8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmaddwev_h_bu_b (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwod_q_du_d (v4i64 _1, v4u64 _2, v4i64 _3) ++{ ++ return __builtin_lasx_xvmaddwod_q_du_d (_1, _2, _3); ++} ++v4i64 ++__lasx_xvmaddwod_d_wu_w (v4i64 _1, v8u32 _2, v8i32 _3) ++{ ++ return __builtin_lasx_xvmaddwod_d_wu_w (_1, _2, _3); ++} ++v8i32 ++__lasx_xvmaddwod_w_hu_h (v8i32 _1, v16u16 _2, v16i16 _3) ++{ ++ return __builtin_lasx_xvmaddwod_w_hu_h (_1, _2, _3); ++} ++v16i16 ++__lasx_xvmaddwod_h_bu_b (v16i16 _1, v32u8 _2, v32i8 _3) ++{ ++ return __builtin_lasx_xvmaddwod_h_bu_b (_1, _2, _3); ++} ++v32i8 ++__lasx_xvrotr_b (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvrotr_b (_1, _2); ++} ++v16i16 ++__lasx_xvrotr_h (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvrotr_h (_1, _2); ++} ++v8i32 ++__lasx_xvrotr_w (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvrotr_w (_1, _2); ++} ++v4i64 ++__lasx_xvrotr_d (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvrotr_d (_1, _2); ++} ++v4i64 ++__lasx_xvadd_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvadd_q (_1, _2); ++} ++v4i64 ++__lasx_xvsub_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsub_q (_1, _2); ++} ++v4i64 ++__lasx_xvaddwev_q_du_d (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvaddwev_q_du_d (_1, _2); ++} ++v4i64 ++__lasx_xvaddwod_q_du_d (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvaddwod_q_du_d (_1, _2); ++} ++v4i64 ++__lasx_xvmulwev_q_du_d (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmulwev_q_du_d (_1, _2); ++} ++v4i64 ++__lasx_xvmulwod_q_du_d (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvmulwod_q_du_d (_1, _2); ++} ++v32i8 ++__lasx_xvmskgez_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmskgez_b (_1); ++} ++v32i8 ++__lasx_xvmsknz_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvmsknz_b (_1); ++} ++v16i16 ++__lasx_xvexth_h_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvexth_h_b (_1); ++} ++v8i32 ++__lasx_xvexth_w_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvexth_w_h (_1); ++} ++v4i64 ++__lasx_xvexth_d_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvexth_d_w (_1); ++} ++v4i64 ++__lasx_xvexth_q_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvexth_q_d (_1); ++} ++v16u16 ++__lasx_xvexth_hu_bu (v32u8 _1) ++{ ++ return __builtin_lasx_xvexth_hu_bu (_1); ++} ++v8u32 ++__lasx_xvexth_wu_hu (v16u16 _1) ++{ ++ return __builtin_lasx_xvexth_wu_hu (_1); ++} ++v4u64 ++__lasx_xvexth_du_wu (v8u32 _1) ++{ ++ return __builtin_lasx_xvexth_du_wu (_1); ++} ++v4u64 ++__lasx_xvexth_qu_du (v4u64 _1) ++{ ++ return __builtin_lasx_xvexth_qu_du (_1); ++} ++v32i8 ++__lasx_xvrotri_b (v32i8 _1) ++{ ++ return __builtin_lasx_xvrotri_b (_1, 1); ++} ++v16i16 ++__lasx_xvrotri_h (v16i16 _1) ++{ ++ return __builtin_lasx_xvrotri_h (_1, 1); ++} ++v8i32 ++__lasx_xvrotri_w (v8i32 _1) ++{ ++ return __builtin_lasx_xvrotri_w (_1, 1); ++} ++v4i64 ++__lasx_xvrotri_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvrotri_d (_1, 1); ++} ++v4i64 ++__lasx_xvextl_q_d (v4i64 _1) ++{ ++ return __builtin_lasx_xvextl_q_d (_1); ++} ++v32i8 ++__lasx_xvsrlni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrlni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvsrlni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrlni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvsrlni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrlni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvsrlni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrlni_d_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvsrlrni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrlrni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvsrlrni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrlrni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvsrlrni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrlrni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvsrlrni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrlrni_d_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvssrlni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrlni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvssrlni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvssrlni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvssrlni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlni_d_q (_1, _2, 1); ++} ++v32u8 ++__lasx_xvssrlni_bu_h (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrlni_bu_h (_1, _2, 1); ++} ++v16u16 ++__lasx_xvssrlni_hu_w (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlni_hu_w (_1, _2, 1); ++} ++v8u32 ++__lasx_xvssrlni_wu_d (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlni_wu_d (_1, _2, 1); ++} ++v4u64 ++__lasx_xvssrlni_du_q (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlni_du_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvssrlrni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrlrni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvssrlrni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlrni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvssrlrni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlrni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvssrlrni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlrni_d_q (_1, _2, 1); ++} ++v32u8 ++__lasx_xvssrlrni_bu_h (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrlrni_bu_h (_1, _2, 1); ++} ++v16u16 ++__lasx_xvssrlrni_hu_w (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrlrni_hu_w (_1, _2, 1); ++} ++v8u32 ++__lasx_xvssrlrni_wu_d (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrlrni_wu_d (_1, _2, 1); ++} ++v4u64 ++__lasx_xvssrlrni_du_q (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrlrni_du_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvsrani_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrani_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvsrani_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrani_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvsrani_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrani_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvsrani_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrani_d_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvsrarni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvsrarni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvsrarni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvsrarni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvsrarni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvsrarni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvsrarni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvsrarni_d_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvssrani_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrani_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvssrani_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrani_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvssrani_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrani_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvssrani_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrani_d_q (_1, _2, 1); ++} ++v32u8 ++__lasx_xvssrani_bu_h (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrani_bu_h (_1, _2, 1); ++} ++v16u16 ++__lasx_xvssrani_hu_w (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrani_hu_w (_1, _2, 1); ++} ++v8u32 ++__lasx_xvssrani_wu_d (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrani_wu_d (_1, _2, 1); ++} ++v4u64 ++__lasx_xvssrani_du_q (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrani_du_q (_1, _2, 1); ++} ++v32i8 ++__lasx_xvssrarni_b_h (v32i8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrarni_b_h (_1, _2, 1); ++} ++v16i16 ++__lasx_xvssrarni_h_w (v16i16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrarni_h_w (_1, _2, 1); ++} ++v8i32 ++__lasx_xvssrarni_w_d (v8i32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrarni_w_d (_1, _2, 1); ++} ++v4i64 ++__lasx_xvssrarni_d_q (v4i64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrarni_d_q (_1, _2, 1); ++} ++v32u8 ++__lasx_xvssrarni_bu_h (v32u8 _1, v32i8 _2) ++{ ++ return __builtin_lasx_xvssrarni_bu_h (_1, _2, 1); ++} ++v16u16 ++__lasx_xvssrarni_hu_w (v16u16 _1, v16i16 _2) ++{ ++ return __builtin_lasx_xvssrarni_hu_w (_1, _2, 1); ++} ++v8u32 ++__lasx_xvssrarni_wu_d (v8u32 _1, v8i32 _2) ++{ ++ return __builtin_lasx_xvssrarni_wu_d (_1, _2, 1); ++} ++v4u64 ++__lasx_xvssrarni_du_q (v4u64 _1, v4i64 _2) ++{ ++ return __builtin_lasx_xvssrarni_du_q (_1, _2, 1); ++} ++int ++__lasx_xbnz_b (v32u8 _1) ++{ ++ return __builtin_lasx_xbnz_b (_1); ++} ++int ++__lasx_xbnz_d (v4u64 _1) ++{ ++ return __builtin_lasx_xbnz_d (_1); ++} ++int ++__lasx_xbnz_h (v16u16 _1) ++{ ++ return __builtin_lasx_xbnz_h (_1); ++} ++int ++__lasx_xbnz_v (v32u8 _1) ++{ ++ return __builtin_lasx_xbnz_v (_1); ++} ++int ++__lasx_xbnz_w (v8u32 _1) ++{ ++ return __builtin_lasx_xbnz_w (_1); ++} ++int ++__lasx_xbz_b (v32u8 _1) ++{ ++ return __builtin_lasx_xbz_b (_1); ++} ++int ++__lasx_xbz_d (v4u64 _1) ++{ ++ return __builtin_lasx_xbz_d (_1); ++} ++int ++__lasx_xbz_h (v16u16 _1) ++{ ++ return __builtin_lasx_xbz_h (_1); ++} ++int ++__lasx_xbz_v (v32u8 _1) ++{ ++ return __builtin_lasx_xbz_v (_1); ++} ++int ++__lasx_xbz_w (v8u32 _1) ++{ ++ return __builtin_lasx_xbz_w (_1); ++} ++v4i64 ++__lasx_xvfcmp_caf_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_caf_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_caf_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_caf_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_ceq_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_ceq_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_ceq_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_ceq_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cle_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cle_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cle_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cle_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_clt_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_clt_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_clt_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_clt_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cne_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cne_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cne_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cne_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cor_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cor_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cor_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cor_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cueq_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cueq_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cueq_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cueq_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cule_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cule_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cule_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cule_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cult_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cult_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cult_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cult_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cun_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cun_d (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_cune_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_cune_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cune_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cune_s (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_cun_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_cun_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_saf_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_saf_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_saf_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_saf_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_seq_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_seq_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_seq_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_seq_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sle_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sle_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sle_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sle_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_slt_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_slt_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_slt_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_slt_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sne_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sne_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sne_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sne_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sor_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sor_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sor_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sor_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sueq_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sueq_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sueq_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sueq_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sule_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sule_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sule_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sule_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sult_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sult_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sult_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sult_s (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sun_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sun_d (_1, _2); ++} ++v4i64 ++__lasx_xvfcmp_sune_d (v4f64 _1, v4f64 _2) ++{ ++ return __builtin_lasx_xvfcmp_sune_d (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sune_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sune_s (_1, _2); ++} ++v8i32 ++__lasx_xvfcmp_sun_s (v8f32 _1, v8f32 _2) ++{ ++ return __builtin_lasx_xvfcmp_sun_s (_1, _2); ++} ++v4f64 ++__lasx_xvpickve_d_f (v4f64 _1) ++{ ++ return __builtin_lasx_xvpickve_d_f (_1, 1); ++} ++v8f32 ++__lasx_xvpickve_w_f (v8f32 _1) ++{ ++ return __builtin_lasx_xvpickve_w_f (_1, 1); ++} ++v32i8 ++__lasx_xvrepli_b () ++{ ++ return __builtin_lasx_xvrepli_b (1); ++} ++v4i64 ++__lasx_xvrepli_d () ++{ ++ return __builtin_lasx_xvrepli_d (1); ++} ++v16i16 ++__lasx_xvrepli_h () ++{ ++ return __builtin_lasx_xvrepli_h (1); ++} ++v8i32 ++__lasx_xvrepli_w () ++{ ++ return __builtin_lasx_xvrepli_w (1); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-sad.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-sad.c +new file mode 100644 +index 000000000..6c0cdfd97 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-sad.c +@@ -0,0 +1,20 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++ ++#define N 1024 ++ ++#define TEST(SIGN) \ ++ SIGN char a_##SIGN[N], b_##SIGN[N]; \ ++ int f_##SIGN (void) \ ++ { \ ++ int i, sum = 0; \ ++ for (i = 0; i < N; i++) \ ++ sum += __builtin_abs (a_##SIGN[i] - b_##SIGN[i]);; \ ++ return sum; \ ++ } ++ ++TEST(signed); ++TEST(unsigned); ++ ++/* { dg-final { scan-assembler {\txvabsd.bu\t} } } */ ++/* { dg-final { scan-assembler {\txvabsd.b\t} } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c +new file mode 100644 +index 000000000..41fae32df +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x34598d0fd19314cb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1820939b2280fa86; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4a1c269b8e892a3a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x063f2bb758abc664; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffc0fcffffcf83; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000288a00003c1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x3459730f2f6d1435; ++ *((unsigned long *)&__m256i_result[2]) = 0x19212d61237f2b03; ++ *((unsigned long *)&__m256i_result[1]) = 0x4a1c266572772a3a; ++ *((unsigned long *)&__m256i_result[0]) = 0x063f032d58557648; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007f017f01; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007f017f01; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001504f4c4b2361; ++ *((unsigned long *)&__m256i_result[2]) = 0x303338a48f374969; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001504f4c4b2361; ++ *((unsigned long *)&__m256i_result[0]) = 0x303338a48f374969; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x807c7fffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80817fff00810000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x807c7fffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80817fff00810000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x80767f0101050101; ++ *((unsigned long *)&__m256i_result[2]) = 0x80817f01007f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x80767f0101050101; ++ *((unsigned long *)&__m256i_result[0]) = 0x80817f01007f0000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x037fe01f001fe020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x037fe01f001fe020; ++ *((unsigned long *)&__m256i_result[3]) = 0x437f201f201f2020; ++ *((unsigned long *)&__m256i_result[2]) = 0x037f201f001f2020; ++ *((unsigned long *)&__m256i_result[1]) = 0x437f201f201f2020; ++ *((unsigned long *)&__m256i_result[0]) = 0x037f201f001f2020; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f60010000080100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f60010000080100; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010100020103; ++ *((unsigned long *)&__m256i_result[2]) = 0x040f040f040b236d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010100020103; ++ *((unsigned long *)&__m256i_result[0]) = 0x040f040f040b236d; ++ __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100010080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100010080; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000073333333; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000040c100000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000040c100000101; ++ *((unsigned long *)&__m256i_result[3]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_result[2]) = 0x000040c100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_result[0]) = 0x000040c100000101; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01ffff4300fffeff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfe0000bcff000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01ffff4300fffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfe0000bcff000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x81ff00bd80ff0101; ++ *((unsigned long *)&__m256i_result[2]) = 0x01ff00bd00ff0101; ++ *((unsigned long *)&__m256i_result[1]) = 0x81ff00bd80ff0101; ++ *((unsigned long *)&__m256i_result[0]) = 0x01ff00bd00ff0101; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003fea00013feb; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe900014022; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003fea00013feb; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe900014022; ++ __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0cc08723ff900001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xcc9b89f2f6cef440; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0cc08723006fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x3364760e09310bc0; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000017f0000017f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000017f0000017f; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbf800000bf800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd662fa0000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbf800000bf800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd6ef750000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x417e01f040800000; ++ *((unsigned long *)&__m256i_result[2]) = 0x299d060000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x417e01f040800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x29108b0000000000; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001700170017; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100010485048a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0005ff870005ff86; ++ *((unsigned long *)&__m256i_result[1]) = 0x000100010485048a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0005ff870005ff86; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400100004001; ++ __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvabsd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvabsd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c +new file mode 100644 +index 000000000..bd7a9069d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000001010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000001010000; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000100010485048a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0005ff870005ff86; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000100010485048a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0005ff870005ff86; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffa0065fffa0066; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffa0065fffa0066; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x400040003abf4000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x400040003abf4000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fff3fff; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff80800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x04080408fff87803; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0001; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffff5fff7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffff5fff7; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001400000014; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_result[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long *)&__m256i_result[0]) = 0xff874dc687870000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_result[0]) = 0x80000000ff7f0001; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe01fe01fe; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000080040; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3f0000400d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f0000400d; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff88; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe98; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_result[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f0000007f7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f0000007f7fff; ++ __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c +new file mode 100644 +index 000000000..293295723 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c +@@ -0,0 +1,725 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x41cfe01dde000000; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f8000007f7fffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f8000007f7fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f8000007f7fffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f8000007f7fffff; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff900000800; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_result[2]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_result[0]) = 0x00e9a80014ff0000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000956a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000956a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xb500000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb500000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007fffffffff9569; ++ *((unsigned long *)&__m256i_result[2]) = 0xb50000004efffe00; ++ *((unsigned long *)&__m256i_result[1]) = 0x007fffffffff9569; ++ *((unsigned long *)&__m256i_result[0]) = 0xb50000004efffe00; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x06f880008000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000010180000101; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfa08800080000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x800080008000480f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001010000010100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000000010100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000000010100; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffff605a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffff605a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffff605a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffff605a; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_result[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_result[1]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_result[0]) = 0x55555555aaaaaaac; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000014402080144; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000008; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000807e7ffe; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[0]) = 0xc2c2c2c2c2c2c2c2; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000006040190d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000006040190d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800200028; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d0005; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000c0000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020001f001f001e; ++ *((unsigned long *)&__m256i_result[2]) = 0x001f001fc01f001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020001f001f001e; ++ *((unsigned long *)&__m256i_result[0]) = 0x001f001f401f001f; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7ffeffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7ffeffffffff; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800000ff800000ff; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000080040; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffeffff0000; ++ __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c +new file mode 100644 +index 000000000..d6b57d1cd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c +@@ -0,0 +1,785 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101008000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101008000000080; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000402000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000402000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000402000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000402000000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffeffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100010102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefefefefefefefef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefefefefefefefef; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefefefefefefef6e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xeeeeeeeeeeeeeeee; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010101012; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101012; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101093; ++ *((unsigned long *)&__m256i_result[0]) = 0x1111111111111113; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0110000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0110000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0110000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0110000000000080; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1000000000000000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xce7ffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xce7ffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x327f010101010102; ++ *((unsigned long *)&__m256i_result[2]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x327f010101010102; ++ *((unsigned long *)&__m256i_result[0]) = 0x6300000000000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff5556aaaa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff5556aaaa; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00020000aaa95556; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00020000aaa95556; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbe632a4f1c3c5653; ++ *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x419cd5b11c3c5654; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x017e01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0586060601fe0202; ++ *((unsigned long *)&__m256i_op0[1]) = 0x017e01fe01fe0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0586060601fe0004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010001000100001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010001000100001; ++ *((unsigned long *)&__m256i_result[3]) = 0x017f01fe01ff01fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x05960616020e0203; ++ *((unsigned long *)&__m256i_result[1]) = 0x017f01fe01ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x05960616020e0005; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010146; ++ *((unsigned long *)&__m256i_result[2]) = 0x01010101010e0106; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010146; ++ *((unsigned long *)&__m256i_result[0]) = 0x01010101010e0106; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000000100000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010486048c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010486048c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000006; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00010001000c4411; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100044411; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000018; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_op1[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long *)&__m256i_result[3]) = 0x223d771060c77e19; ++ *((unsigned long *)&__m256i_result[2]) = 0x3870caad013e76b9; ++ *((unsigned long *)&__m256i_result[1]) = 0x223d771060c81cc7; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ec0a3b2aba7ee9; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2535253514141414; ++ *((unsigned long *)&__m256i_result[2]) = 0x2535253500002535; ++ *((unsigned long *)&__m256i_result[1]) = 0x2535253514141414; ++ *((unsigned long *)&__m256i_result[0]) = 0x2535253500002535; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000f0000000f; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000504f00002361; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff8f81000040e4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000504f00002361; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff8f81000040e4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000007ff000007ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000007ff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000007ff000007ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000007ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000584e00002b60; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000787dffffbf1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000584e00002b60; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000787dffffbf1c; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010200000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fef010000010100; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fef010000010100; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fef010000010100; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fef010000010100; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40b2bf4d30313031; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40b2bf4d30313031; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x40b240b330313031; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff5d425d42; ++ *((unsigned long *)&__m256i_result[1]) = 0x40b240b330313031; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff5d425d42; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000100040; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000100080; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff896099cbdbfff1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc987ffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff896099cbdbfff1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc987ffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00769f673424000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x3678000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00769f673424000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x3678000100000001; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000500000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000700000032; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000500000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000700000032; ++ __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003feec0108022; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fe9c015802c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003feec0108022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fe9c015802c; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f124010c022; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f174015c02c; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f124010c022; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f174015c02c; ++ __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x05ea05ea05ea05ec; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020202020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020202020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000000010000; ++ __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c +new file mode 100644 +index 000000000..054bf6e55 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c +@@ -0,0 +1,427 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x44bb2cd3a35c2fd0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xca355ba46a95e31c; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_result[2]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_result[1]) = 0x61d849f0c0794ced; ++ *((unsigned long *)&__m256i_result[0]) = 0xe75278c187b20039; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe651bfff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_result[2]) = 0x1d1d1d1ddd9d9d1d; ++ *((unsigned long *)&__m256i_result[1]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_result[0]) = 0x1d1d1d1d046fdd1d; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_result[2]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_result[1]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_result[0]) = 0x1515151515151515; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1818181818181818; ++ *((unsigned long *)&__m256i_result[2]) = 0x1818181818181818; ++ *((unsigned long *)&__m256i_result[1]) = 0x1818181818181818; ++ *((unsigned long *)&__m256i_result[0]) = 0x1818181818181818; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202810102020202; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[2]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[1]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[0]) = 0x0909090909090909; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ffce20; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ffce20; ++ *((unsigned long *)&__m256i_result[3]) = 0x1514151415141514; ++ *((unsigned long *)&__m256i_result[2]) = 0x151415141514e335; ++ *((unsigned long *)&__m256i_result[1]) = 0x1514151415141514; ++ *((unsigned long *)&__m256i_result[0]) = 0x151415141514e335; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_result[2]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_result[1]) = 0x0606060606060606; ++ *((unsigned long *)&__m256i_result[0]) = 0x0606060606060606; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1212121212121212; ++ *((unsigned long *)&__m256i_result[2]) = 0x1212121212121212; ++ *((unsigned long *)&__m256i_result[1]) = 0x1212121212121212; ++ *((unsigned long *)&__m256i_result[0]) = 0x1212121212121212; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_result[3]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_result[2]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_result[1]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_result[0]) = 0x1a1a1a2c1a1a1a2c; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d1d1d1e1d1d1d1e; ++ *((unsigned long *)&__m256i_result[2]) = 0x1d1d1d1e1d1d1d1e; ++ *((unsigned long *)&__m256i_result[1]) = 0x1d1d1d1e1d1d1d1e; ++ *((unsigned long *)&__m256i_result[0]) = 0x1d1d1d1e1d1d1d1e; ++ __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[2]) = 0x5982000200020002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[0]) = 0x5982000200020002; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x001f001f02c442af; ++ *((unsigned long *)&__m256i_result[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x001f001f02c442af; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80938013800d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x8091811081118110; ++ *((unsigned long *)&__m256i_result[2]) = 0x80a6802680208015; ++ *((unsigned long *)&__m256i_result[1]) = 0x8091811081110013; ++ *((unsigned long *)&__m256i_result[0]) = 0x80a6802680200018; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000003f00390035; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8015003f0006001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000003f00390035; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8015003f0006001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256i_result[2]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256i_result[0]) = 0x8020004a0011002a; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_result[2]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_result[1]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_result[0]) = 0x0016001600160016; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_result[3]) = 0xa1bfa1bfa1bfa1bf; ++ *((unsigned long *)&__m256i_result[2]) = 0xa1bfa1bf5e7c5e7c; ++ *((unsigned long *)&__m256i_result[1]) = 0xa1bfa1bfa1bfa1bf; ++ *((unsigned long *)&__m256i_result[0]) = 0xa1bfa1bf5e7c5e7c; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[3]) = 0x001a001a001a009a; ++ *((unsigned long *)&__m256i_result[2]) = 0x001a001a002a009a; ++ *((unsigned long *)&__m256i_result[1]) = 0x001a001a001a009a; ++ *((unsigned long *)&__m256i_result[0]) = 0x001a001a002a009a; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x001c001c001c001c; ++ *((unsigned long *)&__m256i_result[2]) = 0x001c001c001c001c; ++ *((unsigned long *)&__m256i_result[1]) = 0x001c001c001c001c; ++ *((unsigned long *)&__m256i_result[0]) = 0x001c001c001d001d; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x721e001e721e001e; ++ *((unsigned long *)&__m256i_result[2]) = 0x721e001e721e001e; ++ *((unsigned long *)&__m256i_result[1]) = 0x721e001e721e001e; ++ *((unsigned long *)&__m256i_result[0]) = 0x721e001e721e001e; ++ __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001900000019; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001a0000001a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001a0000001a; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001900000019; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001d0000001d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001d0000001d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001d0000001d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001d0000001d; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000001fffd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000001fffd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000700020004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000700020004; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000008; ++ __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf259905a0c126604; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6d3c2d3aa1c82947; ++ *((unsigned long *)&__m256i_result[3]) = 0x000019410000e6aa; ++ *((unsigned long *)&__m256i_result[2]) = 0xf259905a0c126614; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000883a00000f30; ++ *((unsigned long *)&__m256i_result[0]) = 0x6d3c2d3aa1c82957; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000d; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff0fff0ff01ff14; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0fff0fff10003; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff0fff0ff01ff14; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0fff0fff10003; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff47b4ffff5879; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fffffff10000006; ++ __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c +new file mode 100644 +index 000000000..70f3bf783 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c +@@ -0,0 +1,740 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[2]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[1]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[0]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffeffffff88; ++ *((unsigned long *)&__m256i_op1[2]) = 0x61e0000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffeffffff88; ++ *((unsigned long *)&__m256i_op1[0]) = 0x61e0000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1ffca0011ffcb; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffe90ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffe90ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff90ffffff80; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000023; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01c601c6fe3afe3a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01c601c6fe3afe3a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000011; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007d003e007d003e; ++ *((unsigned long *)&__m256i_result[2]) = 0x007d003effa80010; ++ *((unsigned long *)&__m256i_result[1]) = 0x007d003e007d003e; ++ *((unsigned long *)&__m256i_result[0]) = 0x007d003effa80010; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5464fbfc416b9f71; ++ *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d8264202b8ea3f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80c72fcd40fb3bc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x84bd087966d4ace0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x26aa68b274dc1322; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe072db2bb9d4cd40; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffcd42ffffecc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000475ffff4c51; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000740dffffad17; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f4bffff7130; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000468600008078; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff328ffffe021; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[2]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[1]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op0[0]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[3]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[2]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[1]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[0]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000399400003994; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000616100004f61; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000504f00002361; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8f81000040e4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000504f00002361; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8f81000040e4; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000012; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x40b240b330313031; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff5d425d42; ++ *((unsigned long *)&__m256i_op1[1]) = 0x40b240b330313031; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff5d425d42; ++ *((unsigned long *)&__m256i_result[3]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_result[1]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff0000739c; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fe000007fe0; ++ __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000012e2110; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000583800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000583800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000100000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007bbbbbbb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007bbbbbbb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000073333333; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f807f007e8080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f807f007e806f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f807f007e8080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f807f007e806f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007e8092; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007e8092; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000062d4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000006338; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff80000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe01fe01fc01fc01; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe01fe01fc01fc01; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fc03bbc; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1b9763952fc4c101; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe37affb42fc05f69; ++ *((unsigned long *)&__m256i_op1[1]) = 0x18b988e64facb558; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe5fb66c81da8e5bb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xe37affb42fc05f69; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x65fb66c81da8e5ba; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1010101010101012; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1010101010101012; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1010101010101093; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1111111111111113; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101110101011; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1111111211111112; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5980000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefe00000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff0127000c0010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff012700040010; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8001000180010000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8001000180010000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800200000002; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000020000000200; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c +new file mode 100644 +index 000000000..22528a14f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000000000000; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffc0003fffa0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fb010201f900ff; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000005554; ++ *((unsigned long *)&__m256i_op1[2]) = 0xaaaa0000aaacfffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000005554; ++ *((unsigned long *)&__m256i_op1[0]) = 0xaaaa0000aaacfffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_result[2]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_result[0]) = 0x00aa000000ac00fe; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x017f01fe01ff01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x05960616020e0203; ++ *((unsigned long *)&__m256i_op0[1]) = 0x017f01fe01ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x05960616020e0005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x017f01fe01ff01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x05960616020e0203; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017f01fe01ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x05960616020e0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe01fc01fe01fc; ++ *((unsigned long *)&__m256i_result[2]) = 0x012c002c001c0006; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe01fc01fe0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x012c002c001c000a; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0007000000fb00ef; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ea005600f90090; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007000000fb00ef; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ea005600f90090; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc03b1fc5e050; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6a9e3fa2603a2000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc03b1fc5e050; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6a9e3fa2603a2000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[2]) = 0x019d00a2003a0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[0]) = 0x019d00a2003a0000; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00e30064001a008f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00e3006300e30063; ++ *((unsigned long *)&__m256i_result[1]) = 0x00e30064001a008f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00e3006300e30063; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000013; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000a400ff004f; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002ffff00020002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f504f104f504f5; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002ffff00020002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f504f104f504f5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000200ff00020002; ++ *((unsigned long *)&__m256i_result[2]) = 0x00f500f100f500f5; ++ *((unsigned long *)&__m256i_result[1]) = 0x000200ff00020002; ++ *((unsigned long *)&__m256i_result[0]) = 0x00f500f100f500f5; ++ __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf259905a0c126604; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6d3c2d3aa1c82947; ++ *((unsigned long *)&__m256i_op1[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op1[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000b6b60001979a; ++ *((unsigned long *)&__m256i_result[2]) = 0x00011591000125be; ++ *((unsigned long *)&__m256i_result[1]) = 0x000093950000a915; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001201600004783; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffff6ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffff6ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff000000ff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff000000ff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffa80000ff31; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffc7f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffc000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffc7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffc000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000b0b100015d1e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001fffe0001bfff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000b0b100015d1e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001fffe0001bfff; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffc0ffc1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f00000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffc0ffc1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f00000000003f; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001fffe0001ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0001003e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001fffe0001ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0001003e; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020010101610000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0061200000610000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020010101610000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0061200000610000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000101000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00011fff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000101000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00011fff0000ffff; ++ __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000013ffffffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000013ffffebd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000013ffffffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000013ffffebd8; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffec; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffebd8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffec; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffebd8; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000c0007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000c0007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3abb3abbbabababa; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0080000000800080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3abb3abbbabababa; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0080000000800080; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000babababa; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000008c0087; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000babababa; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000008c0087; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000000a; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8060000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8060000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x805f0000ffffffff; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe010000fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe010000fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe01fe010000fd02; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe01fe010000fd02; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f807f80; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c +new file mode 100644 +index 000000000..38a0a53d7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_result[2]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_result[1]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_result[0]) = 0x0036003200360032; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000170017; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100fe000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000100fe00010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x000100fe000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100fe00010001; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000bf6e0000c916; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000030000fff3; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004a00000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004a0000002a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004a00000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004a0000002a; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001fff00001fff; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001a001a001a009a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001a001a002a009a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001a001a001a009a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001a001a002a009a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001a000000da; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001a000000da; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001a000000da; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001a000000da; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000007ffffffce; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001ce; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001ce; ++ __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8011ffae800c000c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00baff050083ff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80b900b980380038; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0017ffa8008eff31; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff800c000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000084ff3c; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff80380038; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000008fff31; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001001f001e; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001001f001e; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100f000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100f000ff; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff0ffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff0ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff78ffc0; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000016e00; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff1cff1c; ++ __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1ffca0011feca; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1ffca0011feca; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000002; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000017fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000017fff; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c +new file mode 100644 +index 000000000..a4dc565e9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9240000000008025; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffff24affff8025; ++ *((unsigned long *)&__m256i_op0[1]) = 0xb2c0000000008006; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffb341ffff8006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9240000000008025; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffff24affff8025; ++ *((unsigned long *)&__m256i_op1[1]) = 0xb2c0000000008006; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffb341ffff8006; ++ *((unsigned long *)&__m256i_result[3]) = 0xff2400000000ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffeffe4fffeff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff6400000000ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeff66fffeff00; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0000fffe0002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe0000fffe0002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000fffeffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000fffeffff; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ffeffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7ffeffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f007bfffffffb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f007bfffffffb; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0ffe000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fa0001fff808000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0ffe000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fa0001fff808000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f0000ffffff80; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f0000ffffff80; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400040004; ++ __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001fe; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000d24; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000d24; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4ffc3f7800000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3fc03f6400000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4ffc3f7800000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3fc03f6400000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_result[2]) = 0x000040c100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x000050fd00000101; ++ *((unsigned long *)&__m256i_result[0]) = 0x000040c100000101; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fff; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000400008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000400008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000800080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc9d8080067f50020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc70000020000c000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007ffffffff7ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x49d8080067f4f81f; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffebeb8; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffebeb8; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1111111111111111; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffeffffffdd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c +new file mode 100644 +index 000000000..a2fbe9ed0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff017e01fe; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00b7003600120000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00b7006200fc0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00b7004100190004; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000008e7c00; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000067751500; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000008e7c00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000067751500; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007a00f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff01640092; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007a00f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff01640092; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff000000ff0000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff008000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff008000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff008000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff008000000000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff008000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff008000ff0000; ++ __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000804000004141; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00017fff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007bbb0000f777; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003e6c0000cb7a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003e6c0000cb7a; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000755a0000d8f2; ++ *((unsigned long *)&__m256i_result[2]) = 0x000075740000fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000755a0000d8f2; ++ *((unsigned long *)&__m256i_result[0]) = 0x000075740000fffe; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffee; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffee; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x00009fff00002001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00009fff00002001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000001a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000001a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100010000; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00153f1594ea02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff15c1ea95ea02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000153f15; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff15c1ea; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100fe04ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100fe04ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00f9f9f900000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00f9f9f900000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000faf3f3f2; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xe6e8e6e8e6e8d719; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xe6e8e6e8e6e8d719; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c +new file mode 100644 +index 000000000..8c98fc4be +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000033e87ef1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000033007e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000021; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020002000400040; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffc0003fffc0; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00bb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0057; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff00bb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0057; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000005060503; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000073737; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000050007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000039; ++ __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000007070707; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0102040000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000020100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0703020000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000707; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000070300000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000100640000ff92; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000100640000ff92; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007c0100007c01; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007c0100007c00; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007c0100007c01; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007c0100007c00; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffe0000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000048; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000048; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000010; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffe00009fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fffe00002001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffe00009fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffe00002001; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ffe7fffeffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffd84900000849; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07fffc670800f086; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x311d9b643ec1fe01; ++ *((unsigned long *)&__m256i_op1[0]) = 0x344ade20fe00fd01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007f00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x311d73ad3ec2064a; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff80cbfffffdf8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000081500000104; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffa4fffffffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000700000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff80cbfffffdf8; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffa4fffffffd; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000008050501; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_op0[2]) = 0x019d00a20039fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_op0[0]) = 0x019d00a2003a0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe007a01c40110; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0020001d001f; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000fef0ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000fef0ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000400080ffc080; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f010000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f010000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c +new file mode 100644 +index 000000000..e485786dd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c +@@ -0,0 +1,155 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001c; ++ __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c +new file mode 100644 +index 000000000..26cddc53a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c +@@ -0,0 +1,196 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xe2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x001151510a431048; ++ *((unsigned long *)&__m256i_result[2]) = 0x5b0b08425b09011a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5b5b58595b031019; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x3f3f3f3900000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x3f3f3f3900000003; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[1]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[0]) = 0xbabababababababa; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xba); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000404040004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000404040004040; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x40); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff31; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5e5e5e5e5e5e5e1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5e5e5e5e5e5e5e10; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x5e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x86); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f70000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x7f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xa3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x98); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xd9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xcc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c +new file mode 100644 +index 000000000..bc3590c21 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c +@@ -0,0 +1,125 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf259905a09c23be0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6d3c2d3a89167aeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000090100008492; ++ *((unsigned long *)&__m256i_result[2]) = 0xf000104808420300; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000e20; ++ *((unsigned long *)&__m256i_result[0]) = 0x04082d108006284b; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffdfffdfffdfffd; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xefdfefdfefdfefdf; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c +new file mode 100644 +index 000000000..5ce31ebbd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c +@@ -0,0 +1,680 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001f; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000edff00fffd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fff10000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000cdff00fffd; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff320000ffff; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffe00f7ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffff629d7; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe00f7ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffff629d7; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_result[2]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_result[1]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0xd010101010103218; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400100013; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000400100014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400100013; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010002; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000800080008; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010101013; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101013; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000006170; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000006170; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000030b8; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000202; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000500040005; ++ __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op1[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0xc58a0a0a07070706; ++ *((unsigned long *)&__m256i_result[2]) = 0x006b60e4180b0023; ++ *((unsigned long *)&__m256i_result[1]) = 0x1b39153f334b966a; ++ *((unsigned long *)&__m256i_result[0]) = 0xf1d75d79efcac002; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff00007fff; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffff00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffff00000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fffffff; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080808100808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0080808000808080; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_result[2]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_result[1]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_result[0]) = 0x0888888888888888; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2008000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f3673ece5bd7db1; ++ *((unsigned long *)&__m256i_result[1]) = 0x2008000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f3673ece5bd7db1; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400000003fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400000003fff; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000020202000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000020202000; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffe00fe00; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000001fe01dde; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffe00fe00; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000001fe01dde; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000080040; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c +new file mode 100644 +index 000000000..d04e42753 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x61d849f0c0794ced; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe75278c187b20039; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf90c0c0c00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0ca40c0c0c0c0cc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0c0c0c0c0cb60cc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfbe0b80c960c96d0; ++ *((unsigned long *)&__m256i_result[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_result[2]) = 0x146014141414146e; ++ *((unsigned long *)&__m256i_result[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_result[0]) = 0xf19998668e5f4b84; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff00fff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f007f78; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fefffeff02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00030006fa05f20e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00030081bd80f90e; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00010003fc827a86; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f7f7f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f017fc0ddbf7d86; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000101010001; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f00000000; ++ __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000007f00000022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000003f00000011; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000003f00000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fff3fff3fff3fff; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffbfffafffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffbfffaffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01fc01fe01fc; ++ *((unsigned long *)&__m256i_op1[2]) = 0x012c002c001c0006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01fc01fe0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x012c002c001c000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_result[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_result[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x80938013800d0005; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000800080008000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fff7fffffc08008; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fff7fffffc08008; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x800000007fff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x80000000ff7f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x800000007fff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x80000000ff7f0000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff0000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7e00000000000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fff00400011; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000008001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fff00400011; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0504840303028201; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0504840303028201; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002800000010; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40efffe000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40efffe000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x111ebb784f9c4100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c386546809f3b50; ++ *((unsigned long *)&__m256i_op1[1]) = 0x111ebb784f9bf1ac; ++ *((unsigned long *)&__m256i_op1[0]) = 0x21f6050d955d3f68; ++ *((unsigned long *)&__m256i_result[3]) = 0x088f5dbc27ce2080; ++ *((unsigned long *)&__m256i_result[2]) = 0x161c32a2c04f9da7; ++ *((unsigned long *)&__m256i_result[1]) = 0x088f5dbc27cdf8d6; ++ *((unsigned long *)&__m256i_result[0]) = 0x10fb02864aae9fb4; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe0000fffe0012; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000800080008000; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003ddd80007bbb; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007878; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007878; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001e00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c +new file mode 100644 +index 000000000..37b78aa1b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c +@@ -0,0 +1,770 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long *)&__m256i_result[2]) = 0x2a2a2a2af2d5d5d6; ++ *((unsigned long *)&__m256i_result[1]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long *)&__m256i_result[0]) = 0x2a2a2a2af2d5d5d6; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000c0; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000200020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000200020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000200020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000200020; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[3]) = 0x40f23232330df9c8; ++ *((unsigned long *)&__m256i_result[2]) = 0x40f2323240f23232; ++ *((unsigned long *)&__m256i_result[1]) = 0x40f23232330df9c8; ++ *((unsigned long *)&__m256i_result[0]) = 0x40f2323240f23232; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100c00000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0ff000000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f00f000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0ff000000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f00f000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00f8000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x000800f800000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f8000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x000800f800000000; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000005060503; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000073737; ++ __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xdff8000000000000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff7f0000ff7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff7f0000ff7f; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007f0000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_result[2]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_result[1]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_result[0]) = 0xdfc2df80df80df87; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_result[2]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_result[1]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_result[0]) = 0xdfc2ff20df80ffa7; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_result[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_result[0]) = 0x21f8c3c4c0000005; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_result[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_result[0]) = 0x8848c848c848c848; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000080c000c080; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fffffff3fffc000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fffffff3fffc000; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x003fffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000007ffffffce; ++ __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000808081; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000808081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000808081; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000808081; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000078c0c0008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000078c0c0008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1086658a18ba3594; ++ *((unsigned long *)&__m256i_op1[2]) = 0x160fe9f000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1086658a18ba3594; ++ *((unsigned long *)&__m256i_op1[0]) = 0x160fe9f000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x07a232640bfc1a73; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a66f497ff9effa9; ++ *((unsigned long *)&__m256i_result[1]) = 0x07a232640bfc1a73; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a66f497ff9effa9; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff6a9d8; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000807e7ffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000807e7ffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000807e7ffe; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f8080007f007f; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff8000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000f00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000004a00000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004a0000002a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000004a00000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004a0000002a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fffffffefffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fffffffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002500000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x00008024ffff8014; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc0002500000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x00008024ffff8014; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001a00; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000023a20000a121; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000179e0000951d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000023a20000a121; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000179e0000951d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000125100005111; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000c4f00004b0f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000125100005111; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000c4f00004b0f; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000080008001; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffa2beb040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffa2beb040; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000005858585a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000005858585a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000005858585a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000005858585a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000023a300003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000023a300003fef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000023a300003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000023a300003fef; ++ *((unsigned long *)&__m256i_result[3]) = 0x000011d1ac2c4c2d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000011d1ac2c4c25; ++ *((unsigned long *)&__m256i_result[1]) = 0x000011d1ac2c4c2d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000011d1ac2c4c25; ++ __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c +new file mode 100644 +index 000000000..3944a6ac0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000018803100188; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000018803100188; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000014402080144; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000437f0000201f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000201f00002020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000437f0000201f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000201f00002020; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x90007fff90008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0ffffffe90008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x87ffffff87ffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xc880bfffc880c080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x87ffffffc880c080; ++ __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000082a54290; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000028aa700; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000082a54290; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54287; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f00f841532148; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001a753c3; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f00f841532148; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001b52187; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000004444; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007bbb0000f777; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003dde00007bbc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003dde00007bbc; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_result[1]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fc03f803fc040c0; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0283038402020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0282038402020202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0283038402020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0282038402020202; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x101010100000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0889088908810881; ++ *((unsigned long *)&__m256i_result[2]) = 0x0081010000810100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0889088900810088; ++ *((unsigned long *)&__m256i_result[0]) = 0x0081010000810100; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000001d001d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3e00000440004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3e000004400f400f; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3abb3abbbabababa; ++ *((unsigned long *)&__m256i_result[2]) = 0x0080000000800080; ++ *((unsigned long *)&__m256i_result[1]) = 0x3abb3abbbabababa; ++ *((unsigned long *)&__m256i_result[0]) = 0x0080000000800080; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc0008000c0008000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op1[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5890a0a07070707; ++ *((unsigned long *)&__m256i_result[2]) = 0x006be0e4180b8024; ++ *((unsigned long *)&__m256i_result[1]) = 0x1b399540334c966c; ++ *((unsigned long *)&__m256i_result[0]) = 0x71d7dd7aefcac001; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000808080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_result[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_result[0]) = 0x000a800b000a800b; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00f9f9f900000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00f9f9f900000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007cfcfd80000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007cfcfd80000001; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffc0017fffc001; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffc0017fffc001; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000501e99b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000109973de7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001020f22; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001890b7a39; ++ *((unsigned long *)&__m256i_result[3]) = 0x1b974ebaf6d64d4e; ++ *((unsigned long *)&__m256i_result[2]) = 0x62e0429c1b48fed1; ++ *((unsigned long *)&__m256i_result[1]) = 0x18b985adf63f548c; ++ *((unsigned long *)&__m256i_result[0]) = 0x032c796ecbdecc3b; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3a2a3a2a3a2a3a2a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3a2a3a2a3aaa45aa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3a553f7f7a2a3a2a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3a2a3a2a3aaa45aa; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d949d949d949d95; ++ *((unsigned long *)&__m256i_result[2]) = 0x1d949d949e1423d4; ++ *((unsigned long *)&__m256i_result[1]) = 0x1de9a03f3dd41d95; ++ *((unsigned long *)&__m256i_result[0]) = 0x1d949d949e1423d4; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010100005400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001e001ea1bfa1bf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001e001e83e5422e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001e001ea1bfa1bf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x011f011f0244420e; ++ *((unsigned long *)&__m256i_result[3]) = 0x000f000fd0dfd0df; ++ *((unsigned long *)&__m256i_result[2]) = 0x000f000f83ef4b4a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000f000fd0dfd0df; ++ *((unsigned long *)&__m256i_result[0]) = 0x0110011001224b07; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000030000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000030000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000018002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000018002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long *)&__m256i_result[3]) = 0x111ebb784f9c4100; ++ *((unsigned long *)&__m256i_result[2]) = 0x1c386546809f3b50; ++ *((unsigned long *)&__m256i_result[1]) = 0x111ebb784f9bf1ac; ++ *((unsigned long *)&__m256i_result[0]) = 0x21f6050d955d3f68; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000840100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbffebffec0fe0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000840100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbffebffec0fe0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5fff5fff607f0000; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffa2beb040; ++ __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c +new file mode 100644 +index 000000000..def7b588e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c +@@ -0,0 +1,635 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000105fffffefb; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff02000000fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000105fffffefb; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff02000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_result[2]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_result[0]) = 0xbffffffffffffeff; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff7fff7fffdefd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002555400000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002555400000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000002a542a; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000fff00004542; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00fe00feff02fe; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00fe00feff027f; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00fe00feff02fe; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff027f; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000023a20000a121; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000179e0000951d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000023a20000a121; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000179e0000951d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000100; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000165e0000480d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000165e0000480d; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fee; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fefe7f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fefe7f00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff00000000; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffe00000000; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f80007fa3; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f670000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f80007fa3; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f670000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ffffffffffffffe; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3e8000003e800000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3e8000003e800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3e8000003e800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3e8000003e800000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001ef8d8d8c000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001ef8d8d80000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001ef8d8d8c000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001ef8d8d80000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffe0000000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffe0000000c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefee00000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fff000000000; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ffff88ff88; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff007f007f00; ++ __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffffff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffeffffff00; ++ __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c +new file mode 100644 +index 000000000..713eb19d5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe06df8d7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffbe8b470f; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010ffc80010ff52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff1ffca0011ffcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010bfc80010bf52; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1bfca0011bfcb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010bfc80010bf52; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1bfca0011bfcb; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000005136aaaaa8; ++ *((unsigned long *)&__m256i_result[2]) = 0x55515551aaaaaaa8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000005136aaaaa8; ++ *((unsigned long *)&__m256i_result[0]) = 0x55515551aaaaaaa8; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fdf000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fdf000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fdf7fff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fdf7fff00000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fd0000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000807e7ffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7e7f7e7f7e7f7e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f7e7f7e; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7e7f7e7f7e0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007e7f7e; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x24); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xcf01010143a10101; ++ *((unsigned long *)&__m256i_result[2]) = 0x4b6f01ef4b6f00ef; ++ *((unsigned long *)&__m256i_result[1]) = 0xcf01010143a10101; ++ *((unsigned long *)&__m256i_result[0]) = 0x4b6f01ef4b6f00ef; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdfffffffdfffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xdfffffffdfffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff7fff7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff7f027f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff7f0100; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00fe7f027f; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_result[3]) = 0x8011ffae800c000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x00baff050083ff3c; ++ *((unsigned long *)&__m256i_result[1]) = 0x80b900b980380038; ++ *((unsigned long *)&__m256i_result[0]) = 0x0017ffa8008eff31; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_op0[2]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_op0[0]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_result[2]) = 0x23222120171e151c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_result[0]) = 0x23222120171e151c; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fefe0000fefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fefe0000fefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00fe00fe00fe00fe; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffb; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffb; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800200028; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xefefefefefee00aa; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xefefefefefee00aa; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000f788f788; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe; ++ __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c +new file mode 100644 +index 000000000..2b0e7f8d1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0501030102141923; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffd5020738b43ddb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x010200023b8e4174; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff4ff4e11410b40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fa022a01a401e5; ++ *((unsigned long *)&__m256i_op1[2]) = 0x030d03aa0079029b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x024c01f901950261; ++ *((unsigned long *)&__m256i_op1[0]) = 0x008102c2008a029f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101070102041903; ++ *((unsigned long *)&__m256i_result[2]) = 0xdfd506073ab435db; ++ *((unsigned long *)&__m256i_result[1]) = 0x110202023bae4176; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff6ff4a15418b40; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0edf8d7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffbe8bc70f; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe0edf8d7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffbe8bc70f; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe06df8d7; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffbe8b470f; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000101001e18; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1d1a1b181d1a1b18; ++ *((unsigned long *)&__m256i_result[2]) = 0x9c9b9a999c9b9a99; ++ *((unsigned long *)&__m256i_result[1]) = 0x1d1a1b181d1a1b18; ++ *((unsigned long *)&__m256i_result[0]) = 0x9c9b9a999c9b9a99; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000033e87ef1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x80008000b3e8fef1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x80008000802ea100; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0200000200000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0200000200000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000800080008000; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff00ff00ffff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000180000000; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x8001800180018001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x8001800180018001; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000200000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010002; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f6f7f7f7f6; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f6f7f7f7f6; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f6f7f7f7f6; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f6f7f7f7f6; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7eeefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x7eeefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000fffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000fffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000fffe; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000008000b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000008000b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000008000a; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000008000a; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000a; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000100010001fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100010001fffe; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x40fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x40fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x40fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x40fe00fe00fe00fe; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc0007ffe0002; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000400000018002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffc0007ffe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000400000018002; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefe01010101; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefe01010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000400008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000400008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x010101010101016c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101410128; ++ *((unsigned long *)&__m256i_result[1]) = 0x010101010101016c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101410128; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x800000ff000000ff; ++ __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefe7f; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefe7f; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100018080; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c +new file mode 100644 +index 000000000..2b8327d91 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c +@@ -0,0 +1,317 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff00ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff00ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x01010101fe01fe01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x01010101fe01fe01; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2000200020002000; ++ __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7ff77fff7ff7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7ff77fff7ff7; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000020001; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010121011; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000020000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000020000000000; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x29); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001c4e8ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001c4e8ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0081c4e8ff7fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0081c4e8ff7fffff; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f017ffd; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f017ffd; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002080100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002080100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000a080100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000a080100; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100010001000100; ++ __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[3]) = 0xfee1057c01e10581; ++ *((unsigned long *)&__m256i_result[2]) = 0x011ec1210161057b; ++ *((unsigned long *)&__m256i_result[1]) = 0xfee1057c01e10581; ++ *((unsigned long *)&__m256i_result[0]) = 0x011ec1210161057b; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long *)&__m256i_result[2]) = 0xe27fe2821d226278; ++ *((unsigned long *)&__m256i_result[1]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long *)&__m256i_result[0]) = 0xe27fe2821d226278; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000008; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[3]) = 0x080808000828082f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080008280820; ++ *((unsigned long *)&__m256i_result[1]) = 0x080808000828082f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080008280820; ++ __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000800000000000; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x02000000fdffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x02000000fdffffff; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffeffed; ++ __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_result[2]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_result[0]) = 0xc03b000200020002; ++ __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff81007fff0100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff81007fff0100; ++ __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c +new file mode 100644 +index 000000000..c9847a615 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c +@@ -0,0 +1,134 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001f001f02c442af; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001f001f02c442af; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000c40086; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbe21000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000505300000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbe21000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000505300000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00005053000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00005053000000ff; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000040000; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c +new file mode 100644 +index 000000000..1edb4fca2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c +@@ -0,0 +1,185 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xef); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xcd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffd10000006459; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000441000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000040400000104; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000007fff01ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdb8e209d0cce025a; ++ *((unsigned long *)&__m256i_result[3]) = 0x88888a6d0962002e; ++ *((unsigned long *)&__m256i_result[2]) = 0xdb8a3109fe0f0020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007fff01fffb; ++ *((unsigned long *)&__m256i_result[0]) = 0xdb8e20990cce025a; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x88); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000002b902b3e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000002b902b3e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000002a102a3a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000002a102a3a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xd9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000090909090; ++ *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000090909090; ++ *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x95); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_result[2]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_result[1]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_result[0]) = 0x4545454545454545; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x4d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x21bb481000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01bf481000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x21bb481000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01bf481000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xb1b3b1b1b1b7b1b1; ++ *((unsigned long *)&__m256i_result[2]) = 0xb1b7b1b1b1b1b1b1; ++ *((unsigned long *)&__m256i_result[1]) = 0xb1b3b1b1b1b7b1b1; ++ *((unsigned long *)&__m256i_result[0]) = 0xb1b7b1b1b1b1b1b1; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xb7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc03fc03fc03fc03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc03fc03fc03fc03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000002d; ++ *((unsigned long *)&__m256i_result[2]) = 0xc02dc02dc02dc02d; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000002d; ++ *((unsigned long *)&__m256i_result[0]) = 0xc02dc02dc02dc02d; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xed); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x60600000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x6060000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x60600000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x6060000000000000; ++ __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x60); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c +new file mode 100644 +index 000000000..c195cd91c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff000000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000095120000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc9da000063f50000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffdffffffc81aca; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff3a0b9512; ++ *((unsigned long *)&__m256i_op1[1]) = 0x280bc9db313a63f5; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe032c738adcb6bbb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800001010400; ++ *((unsigned long *)&__m256i_result[2]) = 0x000180009d120004; ++ *((unsigned long *)&__m256i_result[1]) = 0xc9da080067f50020; ++ *((unsigned long *)&__m256i_result[0]) = 0xc73c7fff6bbfffff; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff8046867f79; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6651bfff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00010001; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x010180068080fff9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x3ff1808001020101; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0800000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010103; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000040000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000010000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000040000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000040000010; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbea2e127c046721f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1729c073816edebe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xde91f010000006f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ef1f90efefaf30d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000060000108; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001060005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fef0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xbfa3e127c147721f; ++ *((unsigned long *)&__m256i_result[2]) = 0x1729c173836edfbe; ++ *((unsigned long *)&__m256i_result[1]) = 0xdf91f111808007fb; ++ *((unsigned long *)&__m256i_result[0]) = 0x5ff1f90ffffbf30f; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_result[2]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_result[1]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_result[0]) = 0xe161616161614f61; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01010101010000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808280808082; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808280808082; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808280808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808280808082; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000082f8989a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000d58f43c8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010183f9999b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x01010101d58f43c9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ffe7ffd7ffe7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ffe7ffd7ffe8001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707feb70707b7d1; ++ *((unsigned long *)&__m256i_result[2]) = 0x65baa7efea95a985; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707feb70707b7d1; ++ *((unsigned long *)&__m256i_result[0]) = 0x65baa7ef6a95a987; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7b7b7b7b80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xcacacb1011040500; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7b7b7b7b80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xcacacb1011040500; ++ *((unsigned long *)&__m256i_result[3]) = 0x49cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff4080102102001; ++ *((unsigned long *)&__m256i_result[1]) = 0x49cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff4080102102001; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010401; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010401; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010401; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010401; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_result[2]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_result[0]) = 0x5b7f01ff5b7f10ff; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000080000001000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000080000001000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_op0[2]) = 0x23222120171e151c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_op0[0]) = 0x23222120171e151c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010127272525; ++ *((unsigned long *)&__m256i_result[2]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010127272525; ++ *((unsigned long *)&__m256i_result[0]) = 0x23a2a121179e951d; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x800080008000ffee; ++ *((unsigned long *)&__m256i_result[2]) = 0x800080008000ffee; ++ *((unsigned long *)&__m256i_result[1]) = 0x800080008000ffee; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080008000ffee; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100010001ffff; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00010000fffe0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00010000fffe0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00010000fffe0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00010000fffe0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x01010101010101c9; ++ __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000affff800b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000affff800b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000affff800b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000affff800b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000800; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000400010004; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000f0001000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000f0001000d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000f0001000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000f0001000d; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f010000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f010000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f010100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f010100000101; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x498100814843ffe1; ++ *((unsigned long *)&__m256i_result[2]) = 0x4981008168410001; ++ *((unsigned long *)&__m256i_result[1]) = 0x498100814843ffe1; ++ *((unsigned long *)&__m256i_result[0]) = 0x4981008168410001; ++ __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100002000; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op0[2]) = 0x03acfc5303260e80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op0[0]) = 0x03acfc5303260e80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_result[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_result[2]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_result[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_result[0]) = 0x03acfc5303260e81; ++ __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c +new file mode 100644 +index 000000000..47f37e4b3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c +@@ -0,0 +1,405 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f00007fff; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202020202020202; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000000; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000004000000; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001000000fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808180808093; ++ *((unsigned long *)&__m256i_result[2]) = 0x80808081808080fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808180808093; ++ *((unsigned long *)&__m256i_result[0]) = 0x80808081808080fb; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000020; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000000100000; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[3]) = 0x1000100054445443; ++ *((unsigned long *)&__m256i_result[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[1]) = 0x1000100054445443; ++ *((unsigned long *)&__m256i_result[0]) = 0x7bbbbbbbf7777778; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffa2078fffa2074; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffa2078fffa2074; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeffebfb7afb62; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020206431; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0043030300400300; ++ *((unsigned long *)&__m256i_result[2]) = 0x0043030300400300; ++ *((unsigned long *)&__m256i_result[1]) = 0x0043030300400100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0043030300400100; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long *)&__m256i_result[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3870ca9d013e76b0; ++ *((unsigned long *)&__m256i_result[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ec0a1b2aba7ed0; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f780000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f8f7f80000fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f780000ff80; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f784000ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f8f7f84000fff9; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f784000ff80; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000008000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000008000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffefef800; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000000020000; ++ __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[2]) = 0x00020002000230ba; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[0]) = 0x00020002000230ba; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x8100810081008100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x8100810081008100; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007878; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007878; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000107878; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000107878; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000400140004001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffff2f640006f48; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000400140004001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff2f640006f48; ++ __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[0]) = 0xfd12fd12fd12fd12; ++ __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c +new file mode 100644 +index 000000000..3c1a8b8e6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c +@@ -0,0 +1,130 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00555555553f8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00555555553f8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000030000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000030000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020643100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020643100000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_result[3]) = 0xa90896a400000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xa90896a400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003f003f003f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003f003f003f00; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c +new file mode 100644 +index 000000000..340f7691b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c +@@ -0,0 +1,64 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000007d0d0d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000007d0d0d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000007d0d0d00000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000007d0d0d00000; ++ __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fffffffe000000; ++ __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000018803100188; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000018803100188; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c +new file mode 100644 +index 000000000..dbc52f92b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c +@@ -0,0 +1,449 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffd1b24e00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffc1278fffce4c8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0802010000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0806030008080001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0801010108010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0806000008060302; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fefefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000808; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd04752cdd5543b56; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6906e68064f3d78b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd04752cdd5543b56; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6906e68064f3d78b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000300000002; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc0000000c0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc000000080400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc0000000c0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc000000080400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000000010000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010000100000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000004000000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000004000000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000100010; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclo_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c +new file mode 100644 +index 000000000..89191c467 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c +@@ -0,0 +1,504 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04481940fbb7e6bf; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf2781966e6991966; ++ *((unsigned long *)&__m256i_op0[1]) = 0x51258839aeda77c6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xcf25f0e00f1ff0e0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0501030100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001030100000301; ++ *((unsigned long *)&__m256i_result[1]) = 0x0102000200000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000004030000; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000f0000000f; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000900000000; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080807; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080807; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100001; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000000080000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000000080000; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000018; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000019; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0b085bfc00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0b004bc000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0b085bfc00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0b004bc000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0408010008080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0408010008080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000012; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408010008080808; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0408010008080808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_result[1]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_result[0]) = 0x0504070804040404; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002ffff0000ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002ffff0000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000e; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000032; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000003c000000032; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[2]) = 0x001000100010000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[0]) = 0x001000060010000a; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000c; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000008080800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000008080800; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004001000100004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000400100010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004001000100004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400100010; ++ __m256i_out = __lasx_xvclz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000020; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000029; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000029; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvclz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000027; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvclz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvclz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c +new file mode 100644 +index 000000000..0d7c67703 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00080000000cc916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000006fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00f8000000f41bfb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fa0106; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000fe000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000fe000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000fe000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000fe000000fe; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe8001b72e0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb72e8001b72eaf12; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe000247639d9c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb5308001b72eaf12; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffb5ff80ffd0ffd8; ++ __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8091811081118110; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80a6802680208015; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8091811081110013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80a6802680200018; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8091811081118110; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80a6802680208015; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8091811081110013; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80a6802680200018; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f831f80e0e09f86; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f831f80e0e09f86; ++ __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffa080000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffe080000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffa080000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffe080000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fd00ffff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff81ffffff00; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000d000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000d000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000583800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000583800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d0000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000007fef; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fef; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c +new file mode 100644 +index 000000000..fd8b6d38c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c +@@ -0,0 +1,500 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256i_op1[3]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0303030303020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0303030303020000; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100010001000100; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9fe7fffffffff32e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6040190ddfdd8587; ++ *((unsigned long *)&__m256i_op1[1]) = 0xecd011542d2cc4c7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6040190dffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7fff7f7f7fff7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7fff7f7f7fff7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7fff7f7f7fff7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7fff7f7f7fff7f; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010000; ++ __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe2e2e202ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe2e2e202ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00020001ffb6ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0049004200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffb7; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004c00000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000003fb000003fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000003fb000003fb; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff827f80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0226823c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff827f80; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0226823c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007d003e007d003e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007d003effa80010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007d003e007d003e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007d003effa80010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008000000100; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000013b13380; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000013b13380; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000e2e20000e2e2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00011d1c00011d9c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000e2e20000e2e2; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00011d1c00011d9c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000e2e20000e2e2; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00011d1c00011d9c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000e2e20000e2e2; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00011d1c00011d9c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1b1a191817161514; ++ *((unsigned long *)&__m256i_op1[1]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1b1a191817161514; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000101; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c +new file mode 100644 +index 000000000..94f31019c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2a2a2a2af2d5d5d6; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2b2b2b2b1bd5d5d6; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2a2a2a2af2d5d5d6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002a0000002a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002a0000002a; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff2ffffffd5; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffd5ffffffd6; ++ __m256i_out = __lasx_vext2xv_w_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_w_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff0; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000017f; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff00fff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff00fffffff0; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffe20; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001dfffffe1f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000498000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x00004843ffffffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000498000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000684000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000017; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff82; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd100645944100004; ++ *((unsigned long *)&__m256i_op0[2]) = 0xd1908469108400d1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000404040104; ++ *((unsigned long *)&__m256i_op0[0]) = 0xd1108199714910f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004040104; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffd1108199; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000714910f9; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c +new file mode 100644 +index 000000000..d93201bc4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c +@@ -0,0 +1,669 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x639c3fffb5dffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb8c7800094400001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0063009c003f00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00b500df00ff00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b800c700800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0094004000000001; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00aa00ab00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00aa00ab00ff00ff; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01ff01ff01c0003e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01ff01ff01c0003e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000100ff000100ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100c00000003e; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000f0001000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000f0001000d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000f0001000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000f0001000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000000d; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000005f000000f0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000f9; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000f3; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000029; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000029; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fd; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff7fff7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff7f027f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff7f0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00fe7f027f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000007f; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000003fbfc04; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001fdfe02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000003fbfc04; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001fdfe02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ef; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002e0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002e0000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000002e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000002e; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000002e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fffe; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcfee0fe00ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffcfee0fe00ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffc0000fee0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fe000000ffe0; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000001b; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001b; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000fd00000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000017f7f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000017f7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f00007f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff00000000ff; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000498000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x000048430000ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000498000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000684000000000; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00007edd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00007ed3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00007edf; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00007edf; ++ __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000801380f380fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000801380f300fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008013; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000080f3; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fb; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000007f; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c +new file mode 100644 +index 000000000..9fb4e3ff0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[3]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[2]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[1]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[0]) = 0x005500550055ffab; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_h_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffec; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffebd8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffec; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffebd8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffec; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff1cffffff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff1cffffff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffff1cffffff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff1cffffff1c; ++ __m256i_out = __lasx_xvexth_w_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010101; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvexth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007f00340040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000007f000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00080000002c0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0008000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00080000002c0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0008000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00080000002c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00080000002c0000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c +new file mode 100644 +index 000000000..fe6ff15d8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c +@@ -0,0 +1,592 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004500f300fb; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x004100df00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00c000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x004100df00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00c000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f00ff007f00ff; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200010002; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffff8046867f79; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6651bfff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f3280000dfff; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fffe; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000104000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000104000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000165a; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001010600000106; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001010600000106; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007cfcfd80000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007cfcfd80000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000020ff790020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000020ff790020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000001ff000003fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000001ff000003fe; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010100000101; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_du_wu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x36722a7e66972cd6; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff5f5c; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000005e; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvexth_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c +new file mode 100644 +index 000000000..c0d3e8e75 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c +@@ -0,0 +1,86 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ef878780000009; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000201220001011c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000201220001011c; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_q_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c +new file mode 100644 +index 000000000..8c7ab4ed3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c +@@ -0,0 +1,163 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010100005400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010100005400; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvextl_qu_du (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c +new file mode 100644 +index 000000000..8e61f1c6d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000020202; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002020202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000020200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf28b0686066be60; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff49fe4200000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0xbf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0xfe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x9f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0xc4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x99); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000fffffefc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000fffffffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000fffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000fffffffff; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x8f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000061; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000061; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x83); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007bbbbbbb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007bbbbbbb; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x8d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xda); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f00007f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x87); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xa5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2b2a292827262524; ++ *((unsigned long *)&__m256i_op0[2]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2b2a292827262524; ++ *((unsigned long *)&__m256i_op0[0]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_result[2]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000027262524; ++ *((unsigned long *)&__m256i_result[0]) = 0x232221201f1e1d1c; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0xbd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000080000000; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0xb8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc6ffc6003a003a; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x54); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0xe7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00010001000100; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x7b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x6f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xf6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x7b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff00fff8ffc0; ++ __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x82); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002000000; ++ __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x43); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff007ffd61; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff007ffd61; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x62); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c +new file mode 100644 +index 000000000..657a19e58 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffbe8b470f; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fffffffffffffff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_result[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256d_result[0]) = 0x00007fff00007fff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fffffffa2beb040; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000001c000000134; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000001c000000134; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000001c000000134; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000001c000000134; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000038000000268; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000038000000268; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fff7fff7fff7fff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000405; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000405; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000040; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256d_result[3]) = 0x00000000ff890000; ++ *((unsigned long *)&__m256d_result[2]) = 0x00000000ff790000; ++ *((unsigned long *)&__m256d_result[1]) = 0x00000000ff890000; ++ *((unsigned long *)&__m256d_result[0]) = 0x00000000ff790000; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000000000006d; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000000000006d; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256d_result[3]) = 0x00000000000000ad; ++ *((unsigned long *)&__m256d_result[2]) = 0x00000000001800ad; ++ *((unsigned long *)&__m256d_result[1]) = 0x00000000000000ad; ++ *((unsigned long *)&__m256d_result[0]) = 0x00000000001800ad; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x2020000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x2020000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fffffffffffffff; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff8000; ++ __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000400000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000400000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000010100000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000010100000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00008000003f0000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00390015003529c1; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00008000003f0000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00390015003529c1; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x80007ffe81fdfe03; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x7f00d5007f00ffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x7f00ffffff00ffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x7f00d5007f00ffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x7f00ffffff00ffff; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffff00000002; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffff00000002; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x00ff00fe00ff00fe; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256d_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256d_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0002555400000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0002555400000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffe367cc82f8989a; ++ *((unsigned long *)&__m256d_op0[2]) = 0x4f90000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffc3aaa8d58f43c8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41cc5bb8a95fd1eb; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c +new file mode 100644 +index 000000000..4002c4074 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c +@@ -0,0 +1,911 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0x00000001; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000002; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x00000001; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000002; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0x00000001; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000002; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0x00000001; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000002; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x40b2bf4d; ++ *((int *)&__m256_op0[6]) = 0x30313031; ++ *((int *)&__m256_op0[5]) = 0x50005000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x40b2bf4d; ++ *((int *)&__m256_op0[2]) = 0x30313031; ++ *((int *)&__m256_op0[1]) = 0x50005000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x22be22be; ++ *((int *)&__m256_op1[5]) = 0x7fff7fff; ++ *((int *)&__m256_op1[4]) = 0xa2bea2be; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x22be22be; ++ *((int *)&__m256_op1[1]) = 0x7fff7fff; ++ *((int *)&__m256_op1[0]) = 0xa2bea2be; ++ *((int *)&__m256_result[7]) = 0x40b2bf4d; ++ *((int *)&__m256_result[6]) = 0x30313031; ++ *((int *)&__m256_result[5]) = 0x7fff7fff; ++ *((int *)&__m256_result[4]) = 0xa2bea2be; ++ *((int *)&__m256_result[3]) = 0x40b2bf4d; ++ *((int *)&__m256_result[2]) = 0x30313031; ++ *((int *)&__m256_result[1]) = 0x7fff7fff; ++ *((int *)&__m256_result[0]) = 0xa2bea2be; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00ff0000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00ff0000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00ff0000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00ff0000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000008c; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000008c; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0000008c; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x0000008c; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000118; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000118; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffff8000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffff8000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffff8000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffff8000; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff0101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffff0101; ++ *((int *)&__m256_result[4]) = 0x00000001; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffff0101; ++ *((int *)&__m256_result[0]) = 0x00000001; ++ __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x10101011; ++ *((int *)&__m256_op1[4]) = 0x10101011; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x11111112; ++ *((int *)&__m256_op1[0]) = 0x11111112; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00060000; ++ *((int *)&__m256_op0[6]) = 0x00040000; ++ *((int *)&__m256_op0[5]) = 0x00020000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00060000; ++ *((int *)&__m256_op0[2]) = 0x00040000; ++ *((int *)&__m256_op0[1]) = 0x00020000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00060000; ++ *((int *)&__m256_op1[6]) = 0x00040000; ++ *((int *)&__m256_op1[5]) = 0x00020000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00060000; ++ *((int *)&__m256_op1[2]) = 0x00040000; ++ *((int *)&__m256_op1[1]) = 0x00020000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x000000ff; ++ *((int *)&__m256_op0[4]) = 0x000000ff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x000000ff; ++ *((int *)&__m256_op0[0]) = 0x000000ff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000101; ++ *((int *)&__m256_op1[4]) = 0x00000101; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000101; ++ *((int *)&__m256_op1[0]) = 0x00000101; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffff001f; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x007fe268; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffff001f; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x007fe268; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0xffff001f; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x007fe268; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xffff001f; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x007fe268; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xffff001f; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xffff001f; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x7f800000; ++ *((int *)&__m256_op1[6]) = 0x7f800000; ++ *((int *)&__m256_op1[5]) = 0x7f800000; ++ *((int *)&__m256_op1[4]) = 0x7f800000; ++ *((int *)&__m256_op1[3]) = 0x7f800000; ++ *((int *)&__m256_op1[2]) = 0x7f800000; ++ *((int *)&__m256_op1[1]) = 0x7f800000; ++ *((int *)&__m256_op1[0]) = 0x7f800000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x02a54290; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x02a54290; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x02a54290; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0154dc84; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x02a54290; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000089; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x82a54290; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x028aa700; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x82a54290; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x02a54287; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00004200; ++ *((int *)&__m256_op0[6]) = 0x80000000; ++ *((int *)&__m256_op0[5]) = 0x5fff5fff; ++ *((int *)&__m256_op0[4]) = 0x607f0000; ++ *((int *)&__m256_op0[3]) = 0x00004200; ++ *((int *)&__m256_op0[2]) = 0x80000000; ++ *((int *)&__m256_op0[1]) = 0x5fff5fff; ++ *((int *)&__m256_op0[0]) = 0x607f0000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00004200; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x5fff5fff; ++ *((int *)&__m256_result[4]) = 0x607f0000; ++ *((int *)&__m256_result[3]) = 0x00004200; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x5fff5fff; ++ *((int *)&__m256_result[0]) = 0x607f0000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00800080; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000202; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00800080; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000202; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00800080; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000202; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00800080; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000202; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffefffe; ++ *((int *)&__m256_op0[6]) = 0xfffefffe; ++ *((int *)&__m256_op0[5]) = 0xfffefffe; ++ *((int *)&__m256_op0[4]) = 0xfffefffe; ++ *((int *)&__m256_op0[3]) = 0xfffefffe; ++ *((int *)&__m256_op0[2]) = 0xfffefffe; ++ *((int *)&__m256_op0[1]) = 0xfffefffe; ++ *((int *)&__m256_op0[0]) = 0xfffefffe; ++ *((int *)&__m256_op1[7]) = 0x000023a3; ++ *((int *)&__m256_op1[6]) = 0x00003fff; ++ *((int *)&__m256_op1[5]) = 0x000023a3; ++ *((int *)&__m256_op1[4]) = 0x00003fef; ++ *((int *)&__m256_op1[3]) = 0x000023a3; ++ *((int *)&__m256_op1[2]) = 0x00003fff; ++ *((int *)&__m256_op1[1]) = 0x000023a3; ++ *((int *)&__m256_op1[0]) = 0x00003fef; ++ *((int *)&__m256_result[7]) = 0xfffefffe; ++ *((int *)&__m256_result[6]) = 0xfffefffe; ++ *((int *)&__m256_result[5]) = 0xfffefffe; ++ *((int *)&__m256_result[4]) = 0xfffefffe; ++ *((int *)&__m256_result[3]) = 0xfffefffe; ++ *((int *)&__m256_result[2]) = 0xfffefffe; ++ *((int *)&__m256_result[1]) = 0xfffefffe; ++ *((int *)&__m256_result[0]) = 0xfffefffe; ++ __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x002a542a; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x002a542a; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7fc00000; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7fc00000; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00fe00fe; ++ *((int *)&__m256_op0[6]) = 0x00fe00fe; ++ *((int *)&__m256_op0[5]) = 0x00fe00fe; ++ *((int *)&__m256_op0[4]) = 0x00fe00fe; ++ *((int *)&__m256_op0[3]) = 0x00fe00fe; ++ *((int *)&__m256_op0[2]) = 0x00fe00fe; ++ *((int *)&__m256_op0[1]) = 0x00fe00fe; ++ *((int *)&__m256_op0[0]) = 0x00fe00fe; ++ *((int *)&__m256_op1[7]) = 0x00fe00fe; ++ *((int *)&__m256_op1[6]) = 0x00fe00fe; ++ *((int *)&__m256_op1[5]) = 0x00fe00fe; ++ *((int *)&__m256_op1[4]) = 0x00fe00fe; ++ *((int *)&__m256_op1[3]) = 0x00fe00fe; ++ *((int *)&__m256_op1[2]) = 0x00fe00fe; ++ *((int *)&__m256_op1[1]) = 0x00fe00fe; ++ *((int *)&__m256_op1[0]) = 0x00fe00fe; ++ *((int *)&__m256_result[7]) = 0x3f800000; ++ *((int *)&__m256_result[6]) = 0x3f800000; ++ *((int *)&__m256_result[5]) = 0x3f800000; ++ *((int *)&__m256_result[4]) = 0x3f800000; ++ *((int *)&__m256_result[3]) = 0x3f800000; ++ *((int *)&__m256_result[2]) = 0x3f800000; ++ *((int *)&__m256_result[1]) = 0x3f800000; ++ *((int *)&__m256_result[0]) = 0x3f800000; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7fc00000; ++ *((int *)&__m256_result[6]) = 0x7fc00000; ++ *((int *)&__m256_result[5]) = 0x7fc00000; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x7fc00000; ++ *((int *)&__m256_result[2]) = 0x7fc00000; ++ *((int *)&__m256_result[1]) = 0x7fc00000; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x803f6004; ++ *((int *)&__m256_op0[4]) = 0x1f636003; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x803f6004; ++ *((int *)&__m256_op0[0]) = 0x1f636003; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x007f0107; ++ *((int *)&__m256_op1[4]) = 0x00c70106; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x007f0107; ++ *((int *)&__m256_op1[0]) = 0x00c70106; ++ *((int *)&__m256_result[7]) = 0x7fc00000; ++ *((int *)&__m256_result[6]) = 0x7fc00000; ++ *((int *)&__m256_result[5]) = 0xbeff7cfd; ++ *((int *)&__m256_result[4]) = 0x5e123f94; ++ *((int *)&__m256_result[3]) = 0x7fc00000; ++ *((int *)&__m256_result[2]) = 0x7fc00000; ++ *((int *)&__m256_result[1]) = 0xbeff7cfd; ++ *((int *)&__m256_result[0]) = 0x5e123f94; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000008; ++ *((int *)&__m256_op0[6]) = 0x60601934; ++ *((int *)&__m256_op0[5]) = 0x00000008; ++ *((int *)&__m256_op0[4]) = 0x00200028; ++ *((int *)&__m256_op0[3]) = 0x00000008; ++ *((int *)&__m256_op0[2]) = 0x60601934; ++ *((int *)&__m256_op0[1]) = 0x00000008; ++ *((int *)&__m256_op0[0]) = 0x00200028; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c +new file mode 100644 +index 000000000..5d5b4c43c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c +@@ -0,0 +1,152 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000017f0000017f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256d_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256d_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256d_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvfclass_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c +new file mode 100644 +index 000000000..888e85b6e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c +@@ -0,0 +1,95 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xfffffff8; ++ *((int *)&__m256_op0[6]) = 0xffffff08; ++ *((int *)&__m256_op0[5]) = 0x00ff00f8; ++ *((int *)&__m256_op0[4]) = 0x00ffcff8; ++ *((int *)&__m256_op0[3]) = 0xfffffff8; ++ *((int *)&__m256_op0[2]) = 0xffffff08; ++ *((int *)&__m256_op0[1]) = 0x00ff00f8; ++ *((int *)&__m256_op0[0]) = 0x00ffcff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008000000080; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000020000000200; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x000000ff; ++ *((int *)&__m256_op0[4]) = 0x000000ff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x000000ff; ++ *((int *)&__m256_op0[0]) = 0x000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000100; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffffb; ++ *((int *)&__m256_op0[6]) = 0xfffffffb; ++ *((int *)&__m256_op0[5]) = 0xfffffffb; ++ *((int *)&__m256_op0[4]) = 0xfffffffb; ++ *((int *)&__m256_op0[3]) = 0xfffffffb; ++ *((int *)&__m256_op0[2]) = 0xfffffffb; ++ *((int *)&__m256_op0[1]) = 0xfffffffb; ++ *((int *)&__m256_op0[0]) = 0xfffffffb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvfclass_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c +new file mode 100644 +index 000000000..fa3372358 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c +@@ -0,0 +1,446 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xff56ff55; ++ *((int *)&__m256_op0[4]) = 0xff01ff01; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xff56ff55; ++ *((int *)&__m256_op0[0]) = 0xff01ff01; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x0000abff; ++ *((int *)&__m256_op1[4]) = 0x0000abff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x0000abff; ++ *((int *)&__m256_op1[0]) = 0x0000abff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x0000000a; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000001; ++ *((int *)&__m256_op0[0]) = 0x0000000a; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000040; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x5d20a0a1; ++ *((int *)&__m256_op1[6]) = 0x5d20a0a1; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x5d20a0a1; ++ *((int *)&__m256_op1[2]) = 0x5d20a0a1; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0003ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffff8000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff8000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffff8000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff8000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xe07de080; ++ *((int *)&__m256_op0[4]) = 0x1f20607a; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xe07de080; ++ *((int *)&__m256_op0[0]) = 0x1f20607a; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xe07de080; ++ *((int *)&__m256_op1[4]) = 0x1f20607a; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xe07de080; ++ *((int *)&__m256_op1[0]) = 0x1f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xe07de080; ++ *((int *)&__m256_op1[4]) = 0x1f20607a; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xe07de080; ++ *((int *)&__m256_op1[0]) = 0x1f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000010; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000010; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long *)&__m256d_op1[2]) = 0xa5a5a5a5a5a99e03; ++ *((unsigned long *)&__m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long *)&__m256d_op1[0]) = 0xa5a5a5a5a5a99e03; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x24342434ffff2435; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x24342434ffff2435; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c +new file mode 100644 +index 000000000..6d6649f6f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c +@@ -0,0 +1,977 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00010100; ++ *((int *)&__m256_op0[1]) = 0x00010000; ++ *((int *)&__m256_op0[0]) = 0x01000100; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xbf7f7fff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xe651bfff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x000000ff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x000000ff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0000ffff; ++ *((int *)&__m256_op1[6]) = 0xc0008001; ++ *((int *)&__m256_op1[5]) = 0x0000ffff; ++ *((int *)&__m256_op1[4]) = 0xc0008001; ++ *((int *)&__m256_op1[3]) = 0x0000ffff; ++ *((int *)&__m256_op1[2]) = 0xc0008001; ++ *((int *)&__m256_op1[1]) = 0x0000ffff; ++ *((int *)&__m256_op1[0]) = 0xc0008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffc6ffc6; ++ *((int *)&__m256_op0[6]) = 0x003a003a; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffc6ffc6; ++ *((int *)&__m256_op0[2]) = 0x003a003a; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x71717171; ++ *((int *)&__m256_op1[6]) = 0x71010101; ++ *((int *)&__m256_op1[5]) = 0x8e8e8e8e; ++ *((int *)&__m256_op1[4]) = 0x8f00ffff; ++ *((int *)&__m256_op1[3]) = 0x71717171; ++ *((int *)&__m256_op1[2]) = 0x71010101; ++ *((int *)&__m256_op1[1]) = 0x8e8e8e8e; ++ *((int *)&__m256_op1[0]) = 0x8f00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x000e000e; ++ *((int *)&__m256_op1[4]) = 0x000e000e; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x000e000e; ++ *((int *)&__m256_op1[0]) = 0x000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000043; ++ *((int *)&__m256_op0[4]) = 0x0207f944; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000043; ++ *((int *)&__m256_op0[0]) = 0x0207f944; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000001; ++ *((int *)&__m256_op1[6]) = 0x9ffdf403; ++ *((int *)&__m256_op1[5]) = 0x00000001; ++ *((int *)&__m256_op1[4]) = 0x1ffd97c3; ++ *((int *)&__m256_op1[3]) = 0x00000001; ++ *((int *)&__m256_op1[2]) = 0x9ffdf403; ++ *((int *)&__m256_op1[1]) = 0x00000001; ++ *((int *)&__m256_op1[0]) = 0x1ffd97c3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x7fff7fff; ++ *((int *)&__m256_op0[4]) = 0x7fff7fff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x7fff7fff; ++ *((int *)&__m256_op0[0]) = 0x7fff7fff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000808; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xbea2e127; ++ *((int *)&__m256_op1[6]) = 0xc046721f; ++ *((int *)&__m256_op1[5]) = 0x1729c073; ++ *((int *)&__m256_op1[4]) = 0x816edebe; ++ *((int *)&__m256_op1[3]) = 0xde91f010; ++ *((int *)&__m256_op1[2]) = 0x000006f9; ++ *((int *)&__m256_op1[1]) = 0x5ef1f90e; ++ *((int *)&__m256_op1[0]) = 0xfefaf30d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000200; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000200; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000200; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000200; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000009; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000009; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000009; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffb80000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffb80000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ffff; ++ *((int *)&__m256_op0[6]) = 0x0000ffff; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x0000ffff; ++ *((int *)&__m256_op0[2]) = 0x0000ffff; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfff0fff0; ++ *((int *)&__m256_op0[6]) = 0xff01ff01; ++ *((int *)&__m256_op0[5]) = 0xfff0fff0; ++ *((int *)&__m256_op0[4]) = 0xfff0fff0; ++ *((int *)&__m256_op0[3]) = 0xfff0fff0; ++ *((int *)&__m256_op0[2]) = 0xff01ff01; ++ *((int *)&__m256_op0[1]) = 0xfff0fff0; ++ *((int *)&__m256_op0[0]) = 0xfff0fff0; ++ *((int *)&__m256_op1[7]) = 0xffefffef; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffefffef; ++ *((int *)&__m256_op1[4]) = 0xffefffef; ++ *((int *)&__m256_op1[3]) = 0xffefffef; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffefffef; ++ *((int *)&__m256_op1[0]) = 0xffefffef; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0000ffb1; ++ *((int *)&__m256_op1[6]) = 0x0001ff8f; ++ *((int *)&__m256_op1[5]) = 0x0001004c; ++ *((int *)&__m256_op1[4]) = 0x0001ff87; ++ *((int *)&__m256_op1[3]) = 0x0000ffb1; ++ *((int *)&__m256_op1[2]) = 0x0001ff8f; ++ *((int *)&__m256_op1[1]) = 0x0001004c; ++ *((int *)&__m256_op1[0]) = 0x0001ff87; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00010001; ++ *((int *)&__m256_op1[6]) = 0x00010001; ++ *((int *)&__m256_op1[5]) = 0x00010001; ++ *((int *)&__m256_op1[4]) = 0x00010001; ++ *((int *)&__m256_op1[3]) = 0x00010001; ++ *((int *)&__m256_op1[2]) = 0x00010001; ++ *((int *)&__m256_op1[1]) = 0x00010001; ++ *((int *)&__m256_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffff0000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff0000; ++ *((int *)&__m256_op0[4]) = 0xffff0000; ++ *((int *)&__m256_op0[3]) = 0xffff0000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0000; ++ *((int *)&__m256_op0[0]) = 0xffff0000; ++ *((int *)&__m256_op1[7]) = 0x007f8080; ++ *((int *)&__m256_op1[6]) = 0x007f007f; ++ *((int *)&__m256_op1[5]) = 0x007f8080; ++ *((int *)&__m256_op1[4]) = 0x007f007f; ++ *((int *)&__m256_op1[3]) = 0x007f8080; ++ *((int *)&__m256_op1[2]) = 0x007f007f; ++ *((int *)&__m256_op1[1]) = 0x007f8080; ++ *((int *)&__m256_op1[0]) = 0x007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000033; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000033; ++ *((int *)&__m256_op1[7]) = 0x00004200; ++ *((int *)&__m256_op1[6]) = 0x80000000; ++ *((int *)&__m256_op1[5]) = 0x5fff5fff; ++ *((int *)&__m256_op1[4]) = 0x607f0000; ++ *((int *)&__m256_op1[3]) = 0x00004200; ++ *((int *)&__m256_op1[2]) = 0x80000000; ++ *((int *)&__m256_op1[1]) = 0x5fff5fff; ++ *((int *)&__m256_op1[0]) = 0x607f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x7fff8000; ++ *((int *)&__m256_op1[6]) = 0x7fff0000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00008000; ++ *((int *)&__m256_op1[3]) = 0x7fff8000; ++ *((int *)&__m256_op1[2]) = 0x7fff0000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00008000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00100010; ++ *((int *)&__m256_op1[6]) = 0x00030000; ++ *((int *)&__m256_op1[5]) = 0x00100010; ++ *((int *)&__m256_op1[4]) = 0x00030000; ++ *((int *)&__m256_op1[3]) = 0x00100010; ++ *((int *)&__m256_op1[2]) = 0x00030000; ++ *((int *)&__m256_op1[1]) = 0x00100010; ++ *((int *)&__m256_op1[0]) = 0x00030000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xf90c0c0c00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0ca40c0c0c0c0cc0; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0c0c0c0c0cb60cc0; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfbe0b80c960c96d0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1b9763952fc4c101; ++ *((unsigned long *)&__m256d_op1[2]) = 0xe37affb42fc05f69; ++ *((unsigned long *)&__m256d_op1[1]) = 0x18b988e64facb558; ++ *((unsigned long *)&__m256d_op1[0]) = 0xe5fb66c81da8e5bb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x001e001ea1bfa1bf; ++ *((unsigned long *)&__m256d_op0[2]) = 0x001e001e83e5422e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x001e001ea1bfa1bf; ++ *((unsigned long *)&__m256d_op0[0]) = 0x011f011f0244420e; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffe00f7ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffff629d7; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffe00f7ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffff629d7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_op1[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_op1[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op0[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op0[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffe6ffe6e6800001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x19660019ff806680; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffe6ffe6e6800001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x19660019ff806680; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m256d_op1[3]) = 0x45d5555545d55555; ++ *((unsigned long *)&__m256d_op1[2]) = 0x74555555e8aaaaaa; ++ *((unsigned long *)&__m256d_op1[1]) = 0x45d5555545d55555; ++ *((unsigned long *)&__m256d_op1[0]) = 0x74555555e8aaaaaa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0003030300000100; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op1[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op1[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c +new file mode 100644 +index 000000000..a64dd7598 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c +@@ -0,0 +1,759 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0018796d; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00fffb04; ++ *((int *)&__m256_op0[6]) = 0x02fddf20; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00fffb04; ++ *((int *)&__m256_op0[2]) = 0x02fddf20; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x41dfffc0; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x41dfffdf; ++ *((int *)&__m256_op1[2]) = 0xffc00000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffee; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffee; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffee; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01fe007a; ++ *((int *)&__m256_op0[6]) = 0x01c40110; ++ *((int *)&__m256_op0[5]) = 0x019d00a2; ++ *((int *)&__m256_op0[4]) = 0x0039fff9; ++ *((int *)&__m256_op0[3]) = 0x01fe007a; ++ *((int *)&__m256_op0[2]) = 0x01c40110; ++ *((int *)&__m256_op0[1]) = 0x019d00a2; ++ *((int *)&__m256_op0[0]) = 0x003a0000; ++ *((int *)&__m256_op1[7]) = 0x0000fffe; ++ *((int *)&__m256_op1[6]) = 0x00800022; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x0000fffe; ++ *((int *)&__m256_op1[2]) = 0x00800022; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x7fff7ffe; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x7fff7ffe; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000002; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000002; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000002; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x04000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x04000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x04000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x04000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000040; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000040; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00010001; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00010001; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00010001; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_op0[1]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_op0[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000007773; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000003373; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1616161616161616; ++ *((unsigned long *)&__m256d_op1[2]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7ffe16167f161616; ++ *((unsigned long *)&__m256d_op1[0]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010183f9999b; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[1]) = 0x01010101d58f43c9; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000002070145; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000002070145; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1b976395; ++ *((int *)&__m256_op0[6]) = 0x2fc4c101; ++ *((int *)&__m256_op0[5]) = 0xe37affb4; ++ *((int *)&__m256_op0[4]) = 0x2fc05f69; ++ *((int *)&__m256_op0[3]) = 0x18b988e6; ++ *((int *)&__m256_op0[2]) = 0x4facb558; ++ *((int *)&__m256_op0[1]) = 0xe5fb66c8; ++ *((int *)&__m256_op0[0]) = 0x1da8e5bb; ++ *((int *)&__m256_op1[7]) = 0x01a72334; ++ *((int *)&__m256_op1[6]) = 0xffff00ff; ++ *((int *)&__m256_op1[5]) = 0xff4f6838; ++ *((int *)&__m256_op1[4]) = 0xff937648; ++ *((int *)&__m256_op1[3]) = 0x00a2afb7; ++ *((int *)&__m256_op1[2]) = 0xfff00ecb; ++ *((int *)&__m256_op1[1]) = 0xffce110f; ++ *((int *)&__m256_op1[0]) = 0x004658c7; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00001000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00001000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ff00; ++ *((int *)&__m256_op0[6]) = 0x0000ffff; ++ *((int *)&__m256_op0[5]) = 0x000000ff; ++ *((int *)&__m256_op0[4]) = 0x000000ff; ++ *((int *)&__m256_op0[3]) = 0x0000ff00; ++ *((int *)&__m256_op0[2]) = 0x0000ffff; ++ *((int *)&__m256_op0[1]) = 0x000000ff; ++ *((int *)&__m256_op0[0]) = 0x000000ff; ++ *((int *)&__m256_op1[7]) = 0x0000ffee; ++ *((int *)&__m256_op1[6]) = 0x0000ff4c; ++ *((int *)&__m256_op1[5]) = 0x0000ff05; ++ *((int *)&__m256_op1[4]) = 0x0000ff3c; ++ *((int *)&__m256_op1[3]) = 0x0000fff9; ++ *((int *)&__m256_op1[2]) = 0x0000ff78; ++ *((int *)&__m256_op1[1]) = 0x0000ffa8; ++ *((int *)&__m256_op1[0]) = 0x0000ff31; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffff0000; ++ *((int *)&__m256_op1[4]) = 0xffff0000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffff0000; ++ *((int *)&__m256_op1[0]) = 0xffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ff01; ++ *((int *)&__m256_op0[6]) = 0x00ff0000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ff01; ++ *((int *)&__m256_op0[3]) = 0x0000ff01; ++ *((int *)&__m256_op0[2]) = 0x00ff0000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ff01; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00010000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00010000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x02000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x02000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x01010000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x01010000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffff0101; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffff0101; ++ *((int *)&__m256_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffffb; ++ *((int *)&__m256_op0[6]) = 0xfffffffb; ++ *((int *)&__m256_op0[5]) = 0xfffffffb; ++ *((int *)&__m256_op0[4]) = 0xfffffffb; ++ *((int *)&__m256_op0[3]) = 0xfffffffb; ++ *((int *)&__m256_op0[2]) = 0xfffffffb; ++ *((int *)&__m256_op0[1]) = 0xfffffffb; ++ *((int *)&__m256_op0[0]) = 0xfffffffb; ++ *((int *)&__m256_op1[7]) = 0x0000ffff; ++ *((int *)&__m256_op1[6]) = 0x0001000e; ++ *((int *)&__m256_op1[5]) = 0x0000ffff; ++ *((int *)&__m256_op1[4]) = 0x0000ffff; ++ *((int *)&__m256_op1[3]) = 0x0000ffff; ++ *((int *)&__m256_op1[2]) = 0x0000ffff; ++ *((int *)&__m256_op1[1]) = 0x0000ffff; ++ *((int *)&__m256_op1[0]) = 0x0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x8080808280808082; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8080808280808082; ++ *((unsigned long *)&__m256d_op0[1]) = 0x8080808280808080; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8080808280808082; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c +new file mode 100644 +index 000000000..733cc00ee +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c +@@ -0,0 +1,675 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xfe02fe02; ++ *((int *)&__m256_op0[2]) = 0xfee5fe22; ++ *((int *)&__m256_op0[1]) = 0xff49fe42; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0000ffff; ++ *((int *)&__m256_op1[6]) = 0x0000ff80; ++ *((int *)&__m256_op1[5]) = 0x00004686; ++ *((int *)&__m256_op1[4]) = 0x00007f79; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x0000ffff; ++ *((int *)&__m256_op1[1]) = 0x0000f328; ++ *((int *)&__m256_op1[0]) = 0x0000dfff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x01000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x01000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffff80cb; ++ *((int *)&__m256_op1[6]) = 0xfffffdf8; ++ *((int *)&__m256_op1[5]) = 0x00000815; ++ *((int *)&__m256_op1[4]) = 0x00000104; ++ *((int *)&__m256_op1[3]) = 0xffffffa4; ++ *((int *)&__m256_op1[2]) = 0xfffffffd; ++ *((int *)&__m256_op1[1]) = 0x00000007; ++ *((int *)&__m256_op1[0]) = 0x00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00003f3f; ++ *((int *)&__m256_op1[4]) = 0xc6c68787; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00003f3f; ++ *((int *)&__m256_op1[0]) = 0x87870000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000002; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0x0101ffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x0101ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01000100; ++ *((int *)&__m256_op0[6]) = 0x01000100; ++ *((int *)&__m256_op0[5]) = 0x01000100; ++ *((int *)&__m256_op0[4]) = 0x01000100; ++ *((int *)&__m256_op0[3]) = 0x01000100; ++ *((int *)&__m256_op0[2]) = 0x01000100; ++ *((int *)&__m256_op0[1]) = 0x01000100; ++ *((int *)&__m256_op0[0]) = 0x01000100; ++ *((int *)&__m256_op1[7]) = 0x7f800000; ++ *((int *)&__m256_op1[6]) = 0x7f800000; ++ *((int *)&__m256_op1[5]) = 0x62d2acee; ++ *((int *)&__m256_op1[4]) = 0x7fc00000; ++ *((int *)&__m256_op1[3]) = 0x7f800000; ++ *((int *)&__m256_op1[2]) = 0x7f800000; ++ *((int *)&__m256_op1[1]) = 0x62d2acee; ++ *((int *)&__m256_op1[0]) = 0x7fc00000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ff01; ++ *((int *)&__m256_op0[6]) = 0x00ff0000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ff01; ++ *((int *)&__m256_op0[3]) = 0x0000ff01; ++ *((int *)&__m256_op0[2]) = 0x00ff0000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ff01; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000808; ++ *((int *)&__m256_op1[4]) = 0x00000808; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000808; ++ *((int *)&__m256_op1[0]) = 0x00000808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffff8000; ++ *((int *)&__m256_op0[5]) = 0x7efefefe; ++ *((int *)&__m256_op0[4]) = 0x80ffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x7efefefe; ++ *((int *)&__m256_op0[0]) = 0x80ffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x0001ffaa; ++ *((int *)&__m256_op1[6]) = 0x0000040e; ++ *((int *)&__m256_op1[5]) = 0x00007168; ++ *((int *)&__m256_op1[4]) = 0x00007bb6; ++ *((int *)&__m256_op1[3]) = 0x0001ffe8; ++ *((int *)&__m256_op1[2]) = 0x0001fe9c; ++ *((int *)&__m256_op1[1]) = 0x00002282; ++ *((int *)&__m256_op1[0]) = 0x00001680; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x55555501; ++ *((int *)&__m256_op0[4]) = 0xfefefeab; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x55555501; ++ *((int *)&__m256_op0[0]) = 0xfefefeab; ++ *((int *)&__m256_op1[7]) = 0x00000105; ++ *((int *)&__m256_op1[6]) = 0xfffffefb; ++ *((int *)&__m256_op1[5]) = 0xffffff02; ++ *((int *)&__m256_op1[4]) = 0x000000fe; ++ *((int *)&__m256_op1[3]) = 0x00000105; ++ *((int *)&__m256_op1[2]) = 0xfffffefb; ++ *((int *)&__m256_op1[1]) = 0xffffff02; ++ *((int *)&__m256_op1[0]) = 0x000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000080; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000080; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x0000ffce; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000fc7c; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x0000ffce; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000fc7c; ++ *((int *)&__m256_op1[7]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[6]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[5]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[4]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[3]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[2]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[1]) = 0xe7e7e7e7; ++ *((int *)&__m256_op1[0]) = 0xe7e7e7e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0x0007a861; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0x0007a861; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00100010; ++ *((int *)&__m256_op1[5]) = 0x00100010; ++ *((int *)&__m256_op1[4]) = 0x00100010; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00100010; ++ *((int *)&__m256_op1[1]) = 0x00100010; ++ *((int *)&__m256_op1[0]) = 0x00100010; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x01010101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x000001e0; ++ *((int *)&__m256_op1[6]) = 0x01e001e0; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x000001e0; ++ *((int *)&__m256_op1[2]) = 0x01e001e0; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000000000007f; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256d_op1[1]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0101000001010000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfff1000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfff1000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c +new file mode 100644 +index 000000000..190741070 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c +@@ -0,0 +1,872 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x7fff7fff; ++ *((int *)&__m256_op0[4]) = 0x7fff7fff; ++ *((int *)&__m256_op0[3]) = 0x7fff01fd; ++ *((int *)&__m256_op0[2]) = 0x7fff7fff; ++ *((int *)&__m256_op0[1]) = 0x00007fff; ++ *((int *)&__m256_op0[0]) = 0x7fff7fff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xdededede; ++ *((int *)&__m256_op0[6]) = 0xdededede; ++ *((int *)&__m256_op0[5]) = 0xdededede; ++ *((int *)&__m256_op0[4]) = 0xdededede; ++ *((int *)&__m256_op0[3]) = 0xdededede; ++ *((int *)&__m256_op0[2]) = 0xdededede; ++ *((int *)&__m256_op0[1]) = 0xdededede; ++ *((int *)&__m256_op0[0]) = 0xdededede; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000051; ++ *((int *)&__m256_op1[5]) = 0x00001010; ++ *((int *)&__m256_op1[4]) = 0x00000fff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000051; ++ *((int *)&__m256_op1[1]) = 0x00001010; ++ *((int *)&__m256_op1[0]) = 0x00000fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000051; ++ *((int *)&__m256_op0[5]) = 0x00001010; ++ *((int *)&__m256_op0[4]) = 0x00000fff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000051; ++ *((int *)&__m256_op0[1]) = 0x00001010; ++ *((int *)&__m256_op0[0]) = 0x00000fff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ffff; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x0000ffff; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x000007c8; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x000007c8; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x80000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000001f; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000001f; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0000001f; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x0000001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xfff8ff40; ++ *((int *)&__m256_op0[5]) = 0x0000ff01; ++ *((int *)&__m256_op0[4]) = 0x00090040; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xfff8ff40; ++ *((int *)&__m256_op0[1]) = 0x0000ff01; ++ *((int *)&__m256_op0[0]) = 0x00090040; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xff1cff1c; ++ *((int *)&__m256_op1[6]) = 0xff1cff1c; ++ *((int *)&__m256_op1[5]) = 0xff1cff1c; ++ *((int *)&__m256_op1[4]) = 0xff1cff1c; ++ *((int *)&__m256_op1[3]) = 0xff1cff1c; ++ *((int *)&__m256_op1[2]) = 0xff1cff1c; ++ *((int *)&__m256_op1[1]) = 0xff1cff1c; ++ *((int *)&__m256_op1[0]) = 0xff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00fe01f0; ++ *((int *)&__m256_op0[6]) = 0x00010000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00c40086; ++ *((int *)&__m256_op0[3]) = 0x00fe01f0; ++ *((int *)&__m256_op0[2]) = 0x00010000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00c40086; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x0000ffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x0000ffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x0fff0180; ++ *((int *)&__m256_op0[4]) = 0x0fff0181; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x0fff0180; ++ *((int *)&__m256_op0[0]) = 0x0fff0181; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0003ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xfffffe20; ++ *((int *)&__m256_op0[5]) = 0x0000001d; ++ *((int *)&__m256_op0[4]) = 0xfffffe1f; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x5fa00000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x5fa00000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000004; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00007f95; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000004; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00007f95; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x7f010000; ++ *((int *)&__m256_op0[5]) = 0x00010000; ++ *((int *)&__m256_op0[4]) = 0x00007f7f; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x7f010000; ++ *((int *)&__m256_op0[1]) = 0x00010000; ++ *((int *)&__m256_op0[0]) = 0x00007f7f; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x003f0200; ++ *((int *)&__m256_op0[6]) = 0x01400200; ++ *((int *)&__m256_op0[5]) = 0x003f00ff; ++ *((int *)&__m256_op0[4]) = 0x003f00c4; ++ *((int *)&__m256_op0[3]) = 0x003f0200; ++ *((int *)&__m256_op0[2]) = 0x01400200; ++ *((int *)&__m256_op0[1]) = 0x003f00ff; ++ *((int *)&__m256_op0[0]) = 0x003f00c4; ++ *((int *)&__m256_op1[7]) = 0x00000101; ++ *((int *)&__m256_op1[6]) = 0x01010101; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000101; ++ *((int *)&__m256_op1[2]) = 0x01010101; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x01fe000000ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x01fe000001fe0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0101010101010102; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0101010201010204; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0101010101010102; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0101010101010102; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000e00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010127272525; ++ *((unsigned long *)&__m256d_op1[2]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010127272525; ++ *((unsigned long *)&__m256d_op1[0]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256d_op1[3]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xdff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000040002; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000000000007f; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xc600000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc600000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff56ff55ff01ff01; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff56ff55ff01ff01; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c +new file mode 100644 +index 000000000..8dd58f228 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c +@@ -0,0 +1,340 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x01fe007a; ++ *((int *)&__m256_op1[6]) = 0x01c40110; ++ *((int *)&__m256_op1[5]) = 0x019d00a2; ++ *((int *)&__m256_op1[4]) = 0x0039fff9; ++ *((int *)&__m256_op1[3]) = 0x01fe007a; ++ *((int *)&__m256_op1[2]) = 0x01c40110; ++ *((int *)&__m256_op1[1]) = 0x019d00a2; ++ *((int *)&__m256_op1[0]) = 0x003a0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfff10000; ++ *((int *)&__m256_op0[4]) = 0xfff10000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfff10000; ++ *((int *)&__m256_op0[0]) = 0xfff10000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfff10000; ++ *((int *)&__m256_op1[4]) = 0xfff10000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256d_op1[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256d_op1[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x7); ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffff0100000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffff0100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2); ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000050007; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000039; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c +new file mode 100644 +index 000000000..3230c101d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c +@@ -0,0 +1,361 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x01ffffff; ++ *((int *)&__m256_op1[4]) = 0xfe000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x01ffffff; ++ *((int *)&__m256_op1[0]) = 0xfe000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000504f; ++ *((int *)&__m256_op0[6]) = 0xffff3271; ++ *((int *)&__m256_op0[5]) = 0xffff47b4; ++ *((int *)&__m256_op0[4]) = 0xffff5879; ++ *((int *)&__m256_op0[3]) = 0x0000504f; ++ *((int *)&__m256_op0[2]) = 0xffff3271; ++ *((int *)&__m256_op0[1]) = 0xffff47b4; ++ *((int *)&__m256_op0[0]) = 0xffff5879; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xde00fe00; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0000fe01; ++ *((int *)&__m256_op0[4]) = 0x0000fe01; ++ *((int *)&__m256_op0[3]) = 0xde00fe00; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0000fe01; ++ *((int *)&__m256_op0[0]) = 0x0000fe01; ++ *((int *)&__m256_op1[7]) = 0x0000ffff; ++ *((int *)&__m256_op1[6]) = 0x0000ffff; ++ *((int *)&__m256_op1[5]) = 0x00ff00fe; ++ *((int *)&__m256_op1[4]) = 0x00ff00fe; ++ *((int *)&__m256_op1[3]) = 0x0000ffff; ++ *((int *)&__m256_op1[2]) = 0x0000ffff; ++ *((int *)&__m256_op1[1]) = 0x00ff00fe; ++ *((int *)&__m256_op1[0]) = 0x00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[6]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[5]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[4]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[3]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[2]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[1]) = 0xf3f3f3f3; ++ *((int *)&__m256_op0[0]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[7]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[6]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[5]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[4]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[3]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[2]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[1]) = 0xf3f3f3f3; ++ *((int *)&__m256_op1[0]) = 0xf3f3f3f3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x0007a861; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x0007a861; ++ *((int *)&__m256_op1[7]) = 0x80008000; ++ *((int *)&__m256_op1[6]) = 0x80008000; ++ *((int *)&__m256_op1[5]) = 0x80008000; ++ *((int *)&__m256_op1[4]) = 0xfff98000; ++ *((int *)&__m256_op1[3]) = 0x80008000; ++ *((int *)&__m256_op1[2]) = 0x80008000; ++ *((int *)&__m256_op1[1]) = 0x80008000; ++ *((int *)&__m256_op1[0]) = 0xfff98000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000015d050192cb; ++ *((unsigned long *)&__m256d_op0[2]) = 0x028e509508b16ee9; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000033ff01020e23; ++ *((unsigned long *)&__m256d_op0[0]) = 0x151196b58fd1114d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256d_op1[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256d_op1[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00fe01f000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c +new file mode 100644 +index 000000000..23cbc4bf0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c +@@ -0,0 +1,424 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x0eb7aaaa; ++ *((int *)&__m256_op1[6]) = 0xa6e6ac80; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x0eb7aaaa; ++ *((int *)&__m256_op1[2]) = 0xa6e6ac80; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x3fff3fff; ++ *((int *)&__m256_op0[6]) = 0x3fff3fff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x3fff3fff; ++ *((int *)&__m256_op0[3]) = 0x3fff3fff; ++ *((int *)&__m256_op0[2]) = 0x3fff3fff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x3fff3fff; ++ *((int *)&__m256_op1[7]) = 0x017e01fe; ++ *((int *)&__m256_op1[6]) = 0x01fe01fe; ++ *((int *)&__m256_op1[5]) = 0x05860606; ++ *((int *)&__m256_op1[4]) = 0x01fe0202; ++ *((int *)&__m256_op1[3]) = 0x017e01fe; ++ *((int *)&__m256_op1[2]) = 0x01fe0000; ++ *((int *)&__m256_op1[1]) = 0x05860606; ++ *((int *)&__m256_op1[0]) = 0x01fe0004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000003f; ++ *((int *)&__m256_op0[6]) = 0x00390035; ++ *((int *)&__m256_op0[5]) = 0x8015003f; ++ *((int *)&__m256_op0[4]) = 0x0006001f; ++ *((int *)&__m256_op0[3]) = 0x0000003f; ++ *((int *)&__m256_op0[2]) = 0x00390035; ++ *((int *)&__m256_op0[1]) = 0x8015003f; ++ *((int *)&__m256_op0[0]) = 0x0006001f; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xefdfefdf; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xefdfefdf; ++ *((int *)&__m256_op1[4]) = 0xefdfefdf; ++ *((int *)&__m256_op1[3]) = 0xefdfefdf; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xefdfefdf; ++ *((int *)&__m256_op1[0]) = 0xefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00ff00ff; ++ *((int *)&__m256_op1[6]) = 0x00ff00ff; ++ *((int *)&__m256_op1[5]) = 0x00ff00ff; ++ *((int *)&__m256_op1[4]) = 0x00ff00ff; ++ *((int *)&__m256_op1[3]) = 0x00ff00ff; ++ *((int *)&__m256_op1[2]) = 0x00ff00ff; ++ *((int *)&__m256_op1[1]) = 0x00ff00ff; ++ *((int *)&__m256_op1[0]) = 0x00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x7bfffff0; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x80007fe8; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x7bfffff0; ++ *((int *)&__m256_op0[1]) = 0x00000001; ++ *((int *)&__m256_op0[0]) = 0x80007fe8; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x02020102; ++ *((int *)&__m256_op0[6]) = 0x02020102; ++ *((int *)&__m256_op0[5]) = 0x02020102; ++ *((int *)&__m256_op0[4]) = 0x02020102; ++ *((int *)&__m256_op0[3]) = 0x02020102; ++ *((int *)&__m256_op0[2]) = 0x02020102; ++ *((int *)&__m256_op0[1]) = 0x02020102; ++ *((int *)&__m256_op0[0]) = 0x02020102; ++ *((int *)&__m256_op1[7]) = 0x3e800000; ++ *((int *)&__m256_op1[6]) = 0x3e800000; ++ *((int *)&__m256_op1[5]) = 0x3e800000; ++ *((int *)&__m256_op1[4]) = 0x3e800000; ++ *((int *)&__m256_op1[3]) = 0x3e800000; ++ *((int *)&__m256_op1[2]) = 0x3e800000; ++ *((int *)&__m256_op1[1]) = 0x3e800000; ++ *((int *)&__m256_op1[0]) = 0x3e800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x00ff00ff; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00ff00ff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xff88ff88; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xff88ff88; ++ *((int *)&__m256_op1[7]) = 0xfe01fe01; ++ *((int *)&__m256_op1[6]) = 0x0000fd02; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x3fc03fc0; ++ *((int *)&__m256_op1[3]) = 0xfe01fe01; ++ *((int *)&__m256_op1[2]) = 0x0000fd02; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x3fc03fc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0xffffb2f6; ++ *((int *)&__m256_op0[4]) = 0x00006f48; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0xffffb2f6; ++ *((int *)&__m256_op0[0]) = 0x00006f48; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x000000ff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00100010; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00100010; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00100010; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0020000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0020000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x3fffbfff80000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00004000007f8000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x3fffbfff80000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00004000007f8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c +new file mode 100644 +index 000000000..6641d2c58 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c +@@ -0,0 +1,924 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x59800000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x59800000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x41d66000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x41d66000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xa41aa42e; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xa41aa42e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x83f95466; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x00005400; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xfefefeff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xff295329; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xfefefeff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xff295329; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x111ebb78; ++ *((int *)&__m256_op1[6]) = 0x4f9c4100; ++ *((int *)&__m256_op1[5]) = 0x1c386546; ++ *((int *)&__m256_op1[4]) = 0x809f3b50; ++ *((int *)&__m256_op1[3]) = 0x111ebb78; ++ *((int *)&__m256_op1[2]) = 0x4f9bf1ac; ++ *((int *)&__m256_op1[1]) = 0x21f6050d; ++ *((int *)&__m256_op1[0]) = 0x955d3f68; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffff0000; ++ *((int *)&__m256_op1[4]) = 0xffff0001; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffff0000; ++ *((int *)&__m256_op1[0]) = 0xffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000100; ++ *((int *)&__m256_op0[5]) = 0x00000002; ++ *((int *)&__m256_op0[4]) = 0xff910072; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000100; ++ *((int *)&__m256_op0[1]) = 0x00000002; ++ *((int *)&__m256_op0[0]) = 0xff910072; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffff97a2; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffff97a2; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x55555555; ++ *((int *)&__m256_op0[6]) = 0x3f800000; ++ *((int *)&__m256_op0[5]) = 0x55555555; ++ *((int *)&__m256_op0[4]) = 0x80000000; ++ *((int *)&__m256_op0[3]) = 0x55555555; ++ *((int *)&__m256_op0[2]) = 0x3f800000; ++ *((int *)&__m256_op0[1]) = 0x55555555; ++ *((int *)&__m256_op0[0]) = 0x80000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0001fffe; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x0001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00018002; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000002; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00018002; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000002; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00030000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00030000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xfff70156; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xfff70156; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xfff70156; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xfff70156; ++ *((int *)&__m256_op1[7]) = 0x7fefffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x7fefffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x7fefffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x7fefffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ff70; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ff70; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000100; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000100; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000002; ++ *((int *)&__m256_op1[4]) = 0x00000008; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000002; ++ *((int *)&__m256_op1[0]) = 0x00000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x4393a0a5; ++ *((int *)&__m256_op0[6]) = 0xbc606060; ++ *((int *)&__m256_op0[5]) = 0x43b32fee; ++ *((int *)&__m256_op0[4]) = 0xa9000000; ++ *((int *)&__m256_op0[3]) = 0x4393a0a5; ++ *((int *)&__m256_op0[2]) = 0xbc606060; ++ *((int *)&__m256_op0[1]) = 0x43b32fee; ++ *((int *)&__m256_op0[0]) = 0xa9000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000003; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000003; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000003; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000003; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffeb664; ++ *((int *)&__m256_op0[6]) = 0x007ffd61; ++ *((int *)&__m256_op0[5]) = 0xfffe97a1; ++ *((int *)&__m256_op0[4]) = 0xdf5b41b0; ++ *((int *)&__m256_op0[3]) = 0xfffeb664; ++ *((int *)&__m256_op0[2]) = 0x007ffd61; ++ *((int *)&__m256_op0[1]) = 0xfffe97a1; ++ *((int *)&__m256_op0[0]) = 0xdf5b41b0; ++ *((int *)&__m256_op1[7]) = 0xfffeb683; ++ *((int *)&__m256_op1[6]) = 0x9ffffd80; ++ *((int *)&__m256_op1[5]) = 0xfffe97c0; ++ *((int *)&__m256_op1[4]) = 0x20010001; ++ *((int *)&__m256_op1[3]) = 0xfffeb683; ++ *((int *)&__m256_op1[2]) = 0x9ffffd80; ++ *((int *)&__m256_op1[1]) = 0xfffe97c0; ++ *((int *)&__m256_op1[0]) = 0x20010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x053531f7c6334908; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8e41dcbff87e7900; ++ *((unsigned long *)&__m256d_op0[1]) = 0x12eb8332e3e15093; ++ *((unsigned long *)&__m256d_op0[0]) = 0x9a7491f9e016ccd4; ++ *((unsigned long *)&__m256d_op1[3]) = 0x345947dcd192b5c4; ++ *((unsigned long *)&__m256d_op1[2]) = 0x182100c72280e687; ++ *((unsigned long *)&__m256d_op1[1]) = 0x4a1c80bb8e892e00; ++ *((unsigned long *)&__m256d_op1[0]) = 0x063ecfbd58abc4b7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffff0002fffeffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffff0002fffeffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000010486048c; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000100000006; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000010486048c; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00ff00ff00ef0120; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00ff00ff00ef0120; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff00ffff00000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff00ffff00000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x04e8296f08181818; ++ *((unsigned long *)&__m256d_op1[2]) = 0x032feea900000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x04e8296f08181818; ++ *((unsigned long *)&__m256d_op1[0]) = 0x032feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1400080008000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1400080008000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1400080008000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1400080008000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256d_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256d_op1[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffe045fffffeff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffff7d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c +new file mode 100644 +index 000000000..d25fc25da +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c +@@ -0,0 +1,627 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffff90; ++ *((int *)&__m256_op0[4]) = 0xffffff80; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffff90; ++ *((int *)&__m256_op0[0]) = 0xffffff80; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfefee0e3; ++ *((int *)&__m256_op0[6]) = 0xfefefe00; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xfefee0e3; ++ *((int *)&__m256_op0[2]) = 0xfefefe00; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000ffff; ++ *((int *)&__m256_op0[6]) = 0x0000ffff; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x0000ffff; ++ *((int *)&__m256_op0[2]) = 0x0000ffff; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x8000000a; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x8000000a; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x01010101; ++ *((int *)&__m256_op0[4]) = 0x01010101; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x01010101; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffd8ffc7; ++ *((int *)&__m256_op0[4]) = 0xffdaff8a; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffd8ffc7; ++ *((int *)&__m256_op0[0]) = 0xffdaff8a; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0xffffb3b4; ++ *((int *)&__m256_op1[5]) = 0xfffffff5; ++ *((int *)&__m256_op1[4]) = 0xffff4738; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xffffb3b4; ++ *((int *)&__m256_op1[1]) = 0xfffffff5; ++ *((int *)&__m256_op1[0]) = 0xffff4738; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xf7f7f7f7; ++ *((int *)&__m256_op1[6]) = 0xf7f7f7f8; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xf7f7f7f7; ++ *((int *)&__m256_op1[2]) = 0xf7f7f7f8; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x5fa00000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x5fa00000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op0[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op0[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op0[0]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op1[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op1[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op1[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256d_op1[0]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000105fffffefb; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffff02000000fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000105fffffefb; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffff02000000fe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000020afefb1; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7f350104f7ebffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000003fffc1; ++ *((unsigned long *)&__m256d_op0[0]) = 0x005c0003fff9ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256d_op0[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c +new file mode 100644 +index 000000000..8210f749b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c +@@ -0,0 +1,1212 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000101; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xc08f7800; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xfffffefd; ++ *((int *)&__m256_op0[3]) = 0xc08f7800; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000101; ++ *((int *)&__m256_op1[4]) = 0x00000102; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000101; ++ *((int *)&__m256_op1[0]) = 0x00000102; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x001f1f3e; ++ *((int *)&__m256_op1[6]) = 0x3e1f1f00; ++ *((int *)&__m256_op1[5]) = 0x00030609; ++ *((int *)&__m256_op1[4]) = 0x09060300; ++ *((int *)&__m256_op1[3]) = 0x001f1f3e; ++ *((int *)&__m256_op1[2]) = 0x3e1f1f00; ++ *((int *)&__m256_op1[1]) = 0x00030609; ++ *((int *)&__m256_op1[0]) = 0x09060300; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fffffff; ++ *((int *)&__m256_op0[6]) = 0x7fffffff; ++ *((int *)&__m256_op0[5]) = 0x7fffffff; ++ *((int *)&__m256_op0[4]) = 0x7fffffff; ++ *((int *)&__m256_op0[3]) = 0x7fffffff; ++ *((int *)&__m256_op0[2]) = 0x7fffffff; ++ *((int *)&__m256_op0[1]) = 0x7fffffff; ++ *((int *)&__m256_op0[0]) = 0x7fffffff; ++ *((int *)&__m256_op1[7]) = 0x20fc0000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x20fc0000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffff0400; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffff0400; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x08050501; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x08050501; ++ *((int *)&__m256_op1[7]) = 0x90909090; ++ *((int *)&__m256_op1[6]) = 0x90909090; ++ *((int *)&__m256_op1[5]) = 0x90909090; ++ *((int *)&__m256_op1[4]) = 0x90909090; ++ *((int *)&__m256_op1[3]) = 0x90909090; ++ *((int *)&__m256_op1[2]) = 0x90909090; ++ *((int *)&__m256_op1[1]) = 0x90909090; ++ *((int *)&__m256_op1[0]) = 0x90909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00001ff8; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xd8d8c000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00001ff8; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xd8d8c000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x02020102; ++ *((int *)&__m256_op1[6]) = 0x02020102; ++ *((int *)&__m256_op1[5]) = 0x02020102; ++ *((int *)&__m256_op1[4]) = 0x02020102; ++ *((int *)&__m256_op1[3]) = 0x02020102; ++ *((int *)&__m256_op1[2]) = 0x02020102; ++ *((int *)&__m256_op1[1]) = 0x02020102; ++ *((int *)&__m256_op1[0]) = 0x02020102; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x00ff00ff; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00ff00ff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000001; ++ *((int *)&__m256_op1[6]) = 0xffe00000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000001; ++ *((int *)&__m256_op1[2]) = 0xffe00000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x60000108; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x01060005; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x7fef0001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfffffff8; ++ *((int *)&__m256_op1[4]) = 0xfffffff8; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xfffffff8; ++ *((int *)&__m256_op1[0]) = 0xfc000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x327f0101; ++ *((int *)&__m256_op0[6]) = 0x01010102; ++ *((int *)&__m256_op0[5]) = 0x63000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x327f0101; ++ *((int *)&__m256_op0[2]) = 0x01010102; ++ *((int *)&__m256_op0[1]) = 0x63000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xce7fffff; ++ *((int *)&__m256_op1[6]) = 0xfffffffe; ++ *((int *)&__m256_op1[5]) = 0x63000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xce7fffff; ++ *((int *)&__m256_op1[2]) = 0xfffffffe; ++ *((int *)&__m256_op1[1]) = 0x63000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x59800000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x59800000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0eb7aaaa; ++ *((int *)&__m256_op1[6]) = 0xa6e6ac80; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x0eb7aaaa; ++ *((int *)&__m256_op1[2]) = 0xa6e6ac80; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000007; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xdbc80000; ++ *((int *)&__m256_op1[6]) = 0x00003fff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xdbc80000; ++ *((int *)&__m256_op1[2]) = 0x00003fff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000002; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000007f; ++ *((int *)&__m256_op1[7]) = 0xfffffff3; ++ *((int *)&__m256_op1[6]) = 0x0000000b; ++ *((int *)&__m256_op1[5]) = 0xfffffff3; ++ *((int *)&__m256_op1[4]) = 0xfffffff3; ++ *((int *)&__m256_op1[3]) = 0xfffffff3; ++ *((int *)&__m256_op1[2]) = 0x0000000b; ++ *((int *)&__m256_op1[1]) = 0xfffffff3; ++ *((int *)&__m256_op1[0]) = 0xfffffff3; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x223d76f0; ++ *((int *)&__m256_op0[6]) = 0x9f3881ff; ++ *((int *)&__m256_op0[5]) = 0x3870ca8d; ++ *((int *)&__m256_op0[4]) = 0x013e76a0; ++ *((int *)&__m256_op0[3]) = 0x223d76f0; ++ *((int *)&__m256_op0[2]) = 0x9f37e357; ++ *((int *)&__m256_op0[1]) = 0x43ec0a1b; ++ *((int *)&__m256_op0[0]) = 0x2aba7ed0; ++ *((int *)&__m256_op1[7]) = 0x111ebb78; ++ *((int *)&__m256_op1[6]) = 0x4f9c4100; ++ *((int *)&__m256_op1[5]) = 0x1c386546; ++ *((int *)&__m256_op1[4]) = 0x809f3b50; ++ *((int *)&__m256_op1[3]) = 0x111ebb78; ++ *((int *)&__m256_op1[2]) = 0x4f9bf1ac; ++ *((int *)&__m256_op1[1]) = 0x21f6050d; ++ *((int *)&__m256_op1[0]) = 0x955d3f68; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x27272525; ++ *((int *)&__m256_op0[5]) = 0x23a2a121; ++ *((int *)&__m256_op0[4]) = 0x179e951d; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x27272525; ++ *((int *)&__m256_op0[1]) = 0x23a2a121; ++ *((int *)&__m256_op0[0]) = 0x179e951d; ++ *((int *)&__m256_op1[7]) = 0x00001251; ++ *((int *)&__m256_op1[6]) = 0x00005111; ++ *((int *)&__m256_op1[5]) = 0x00000c4f; ++ *((int *)&__m256_op1[4]) = 0x00004b0f; ++ *((int *)&__m256_op1[3]) = 0x00001251; ++ *((int *)&__m256_op1[2]) = 0x00005111; ++ *((int *)&__m256_op1[1]) = 0x00000c4f; ++ *((int *)&__m256_op1[0]) = 0x00004b0f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0xff800000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x80000000; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0xff800000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x7ff00000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x7ff00000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x7ff00000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x7ff00000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x000000ff; ++ *((int *)&__m256_op1[6]) = 0x000000ff; ++ *((int *)&__m256_op1[5]) = 0x000000ff; ++ *((int *)&__m256_op1[4]) = 0x000000ff; ++ *((int *)&__m256_op1[3]) = 0x000000ff; ++ *((int *)&__m256_op1[2]) = 0x000000ff; ++ *((int *)&__m256_op1[1]) = 0x000000ff; ++ *((int *)&__m256_op1[0]) = 0x000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfe01fe01; ++ *((int *)&__m256_op0[6]) = 0x7e81fd02; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x3fc001fe; ++ *((int *)&__m256_op0[3]) = 0xfe01fe01; ++ *((int *)&__m256_op0[2]) = 0x7e81fd02; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x3fc001fe; ++ *((int *)&__m256_op1[7]) = 0xfe01fe01; ++ *((int *)&__m256_op1[6]) = 0x7e81fd02; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x3fc001fe; ++ *((int *)&__m256_op1[3]) = 0xfe01fe01; ++ *((int *)&__m256_op1[2]) = 0x7e81fd02; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x3fc001fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0ff80100ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0ff80100ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000017000000080; ++ *((unsigned long *)&__m256d_op1[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000017000000080; ++ *((unsigned long *)&__m256d_op1[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x01480000052801a2; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffdcff64; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff000100000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00aa00ab00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00aa00ab00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffe37fe3001d001d; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffe37fe3001d001d; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000104000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000104000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfe01fe017e81fd02; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000003fc001fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfe01fe017e81fd02; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000003fc001fe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000b004a00440040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8020004a0011002a; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffefffefffeffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffefffefffeffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000860601934; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000860601934; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256d_op1[2]) = 0x4079808280057efe; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x007ffcfcfd020202; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000400000004; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c +new file mode 100644 +index 000000000..9d015a5c8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c +@@ -0,0 +1,756 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x80808080; ++ *((int *)&__m256_op0[6]) = 0x80808080; ++ *((int *)&__m256_op0[5]) = 0x80808080; ++ *((int *)&__m256_op0[4]) = 0x80808080; ++ *((int *)&__m256_op0[3]) = 0x80808080; ++ *((int *)&__m256_op0[2]) = 0x80808080; ++ *((int *)&__m256_op0[1]) = 0x80808080; ++ *((int *)&__m256_op0[0]) = 0x80808080; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xefefefef; ++ *((int *)&__m256_op0[6]) = 0xefefefef; ++ *((int *)&__m256_op0[5]) = 0xefefefef; ++ *((int *)&__m256_op0[4]) = 0xefefefef; ++ *((int *)&__m256_op0[3]) = 0xefefefef; ++ *((int *)&__m256_op0[2]) = 0xefefef6e; ++ *((int *)&__m256_op0[1]) = 0xeeeeeeee; ++ *((int *)&__m256_op0[0]) = 0xeeeeeeee; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7f800000; ++ *((int *)&__m256_op0[6]) = 0x7f800000; ++ *((int *)&__m256_op0[5]) = 0x7f800000; ++ *((int *)&__m256_op0[4]) = 0x7f800000; ++ *((int *)&__m256_op0[3]) = 0x7f800000; ++ *((int *)&__m256_op0[2]) = 0x7f800000; ++ *((int *)&__m256_op0[1]) = 0x7f800000; ++ *((int *)&__m256_op0[0]) = 0x7f800000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0x00ff00ff; ++ *((int *)&__m256_op1[6]) = 0x00ff00ff; ++ *((int *)&__m256_op1[5]) = 0x00ff00ff; ++ *((int *)&__m256_op1[4]) = 0x00ff00ff; ++ *((int *)&__m256_op1[3]) = 0x00ff00ff; ++ *((int *)&__m256_op1[2]) = 0x00ff00ff; ++ *((int *)&__m256_op1[1]) = 0x00ff00ff; ++ *((int *)&__m256_op1[0]) = 0x00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x80000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x40404040; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x40404040; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfefefefe; ++ *((int *)&__m256_op1[4]) = 0x3f800000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xfefefefe; ++ *((int *)&__m256_op1[0]) = 0x3f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff0101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x55555501; ++ *((int *)&__m256_op0[4]) = 0xfefefeab; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x55555501; ++ *((int *)&__m256_op0[0]) = 0xfefefeab; ++ *((int *)&__m256_op1[7]) = 0x0010bfc8; ++ *((int *)&__m256_op1[6]) = 0x0010bf52; ++ *((int *)&__m256_op1[5]) = 0xfff1bfca; ++ *((int *)&__m256_op1[4]) = 0x0011bfcb; ++ *((int *)&__m256_op1[3]) = 0x0010bfc8; ++ *((int *)&__m256_op1[2]) = 0x0010bf52; ++ *((int *)&__m256_op1[1]) = 0xfff1bfca; ++ *((int *)&__m256_op1[0]) = 0x0011bfcb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x80008000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80008000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x80008000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80008000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00060000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00060000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000166; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000166; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000004a; ++ *((int *)&__m256_op0[6]) = 0x557baac4; ++ *((int *)&__m256_op0[5]) = 0x556caad9; ++ *((int *)&__m256_op0[4]) = 0xaabbaa88; ++ *((int *)&__m256_op0[3]) = 0x0000004a; ++ *((int *)&__m256_op0[2]) = 0x557baac4; ++ *((int *)&__m256_op0[1]) = 0x556caad9; ++ *((int *)&__m256_op0[0]) = 0xaabbaa88; ++ *((int *)&__m256_op1[7]) = 0x09090909; ++ *((int *)&__m256_op1[6]) = 0x09090909; ++ *((int *)&__m256_op1[5]) = 0x09090909; ++ *((int *)&__m256_op1[4]) = 0x09090909; ++ *((int *)&__m256_op1[3]) = 0x09090909; ++ *((int *)&__m256_op1[2]) = 0x09090909; ++ *((int *)&__m256_op1[1]) = 0x09090909; ++ *((int *)&__m256_op1[0]) = 0x09090909; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x80000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000020; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000020; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op0[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op0[1]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op0[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256d_op1[3]) = 0x88888a6d0962002e; ++ *((unsigned long *)&__m256d_op1[2]) = 0xdb8a3109fe0f0020; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000007fff01fffb; ++ *((unsigned long *)&__m256d_op1[0]) = 0xdb8e20990cce025a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256d_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256d_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffffebeb8; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffffebeb8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000fefefe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffa80000ff31; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c +new file mode 100644 +index 000000000..a61681073 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c +@@ -0,0 +1,438 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x000000ff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x000000ff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000064; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000781; ++ *((int *)&__m256_op0[0]) = 0x00000064; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0c6a2400; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0f002040; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x0c6a2400; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0f002040; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x0000000c; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x0000000c; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0feff00000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0feff00000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfefefefeffe0e0e0; ++ *((unsigned long *)&__m256d_op0[1]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfefefefeffe0e0e0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000040004000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c +new file mode 100644 +index 000000000..41f274920 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c +@@ -0,0 +1,363 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x1e180000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x1e180000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x1e180000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x1e180000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00802000; ++ *((int *)&__m256_op1[6]) = 0x00802000; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00802000; ++ *((int *)&__m256_op1[2]) = 0x00802000; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000055; ++ *((int *)&__m256_op0[6]) = 0x36aaaaac; ++ *((int *)&__m256_op0[5]) = 0x55555555; ++ *((int *)&__m256_op0[4]) = 0xaaaaaaac; ++ *((int *)&__m256_op0[3]) = 0x00000055; ++ *((int *)&__m256_op0[2]) = 0x36aaaaac; ++ *((int *)&__m256_op0[1]) = 0x55555555; ++ *((int *)&__m256_op0[0]) = 0xaaaaaaac; ++ *((int *)&__m256_op1[7]) = 0x00060000; ++ *((int *)&__m256_op1[6]) = 0x00040000; ++ *((int *)&__m256_op1[5]) = 0x00025555; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00060000; ++ *((int *)&__m256_op1[2]) = 0x00040000; ++ *((int *)&__m256_op1[1]) = 0x00025555; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xff240000; ++ *((int *)&__m256_op0[6]) = 0x0000ff00; ++ *((int *)&__m256_op0[5]) = 0xfffeffe4; ++ *((int *)&__m256_op0[4]) = 0xfffeff00; ++ *((int *)&__m256_op0[3]) = 0xff640000; ++ *((int *)&__m256_op0[2]) = 0x0000ff00; ++ *((int *)&__m256_op0[1]) = 0xfffeff66; ++ *((int *)&__m256_op0[0]) = 0xfffeff00; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80808082; ++ *((int *)&__m256_op0[6]) = 0x80808082; ++ *((int *)&__m256_op0[5]) = 0x80808082; ++ *((int *)&__m256_op0[4]) = 0x80808082; ++ *((int *)&__m256_op0[3]) = 0x80808082; ++ *((int *)&__m256_op0[2]) = 0x80808080; ++ *((int *)&__m256_op0[1]) = 0x80808082; ++ *((int *)&__m256_op0[0]) = 0x80808082; ++ *((int *)&__m256_op1[7]) = 0x55555555; ++ *((int *)&__m256_op1[6]) = 0x55555555; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x55555555; ++ *((int *)&__m256_op1[2]) = 0x55555555; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[0]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[0]) = 0x6d6d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000118; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000027; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c +new file mode 100644 +index 000000000..116399a7c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c +@@ -0,0 +1,528 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000003; ++ *((int *)&__m256_op1[6]) = 0x0000000c; ++ *((int *)&__m256_op1[5]) = 0x00000011; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000005; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000008; ++ *((int *)&__m256_op1[0]) = 0x00000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[0]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op1[0]) = 0x6d6d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[0]) = 0x7c007c007c007c00; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00020000; ++ *((int *)&__m256_op1[6]) = 0x00020000; ++ *((int *)&__m256_op1[5]) = 0x00020000; ++ *((int *)&__m256_op1[4]) = 0x00010000; ++ *((int *)&__m256_op1[3]) = 0x00020000; ++ *((int *)&__m256_op1[2]) = 0x00020000; ++ *((int *)&__m256_op1[1]) = 0x00020000; ++ *((int *)&__m256_op1[0]) = 0x00010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x71717171; ++ *((int *)&__m256_op1[6]) = 0x71010101; ++ *((int *)&__m256_op1[5]) = 0x8e8e8e8e; ++ *((int *)&__m256_op1[4]) = 0x8f00ffff; ++ *((int *)&__m256_op1[3]) = 0x71717171; ++ *((int *)&__m256_op1[2]) = 0x71010101; ++ *((int *)&__m256_op1[1]) = 0x8e8e8e8e; ++ *((int *)&__m256_op1[0]) = 0x8f00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7c007c0080008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7c007c0080008000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfff10000; ++ *((int *)&__m256_op0[4]) = 0xfff10000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfff10000; ++ *((int *)&__m256_op0[0]) = 0xfff10000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfff10000; ++ *((int *)&__m256_op1[4]) = 0xfff10000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xfff10000; ++ *((int *)&__m256_op1[0]) = 0xfff10000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff88ff88; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00040000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00040000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xff00ff00; ++ *((int *)&__m256_op0[6]) = 0x3f003f00; ++ *((int *)&__m256_op0[5]) = 0xff0101fd; ++ *((int *)&__m256_op0[4]) = 0x00010100; ++ *((int *)&__m256_op0[3]) = 0xff00ff00; ++ *((int *)&__m256_op0[2]) = 0x3f003f00; ++ *((int *)&__m256_op0[1]) = 0xff0101fd; ++ *((int *)&__m256_op0[0]) = 0x00010100; ++ *((int *)&__m256_op1[7]) = 0x01ffff43; ++ *((int *)&__m256_op1[6]) = 0x00fffeff; ++ *((int *)&__m256_op1[5]) = 0xfe0000bc; ++ *((int *)&__m256_op1[4]) = 0xff000100; ++ *((int *)&__m256_op1[3]) = 0x01ffff43; ++ *((int *)&__m256_op1[2]) = 0x00fffeff; ++ *((int *)&__m256_op1[1]) = 0xfe0000bc; ++ *((int *)&__m256_op1[0]) = 0xff000100; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fc00fc00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fc00fc00; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0cc08723ff900001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xcc9b89f2f6cef440; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xfffffff8; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xfffffff8; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff800000ff800000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00003f784000ff80; ++ *((unsigned long *)&__m256d_op1[1]) = 0xf7f8f7f84000fff9; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00003f784000ff80; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000555500005555; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000555500005555; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000555500005555; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000555500005555; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffb6804cb9; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffb7bbdec0; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffb680489b; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffb7bc02a0; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xfffffffd; ++ *((int *)&__m256_result[4]) = 0xfffffffd; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xfffffffd; ++ *((int *)&__m256_result[0]) = 0xfffffffd; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010202020203; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101010201010102; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010202020203; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010201010102; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x3fff3fff3fff3fc4; ++ *((unsigned long *)&__m256d_op1[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x3fff3fff3fff3fc4; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x3ff9fffa; ++ *((int *)&__m256_result[4]) = 0x3ff9fffa; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x3ff9fffa; ++ *((int *)&__m256_result[0]) = 0x3ff9fffa; ++ __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c +new file mode 100644 +index 000000000..001ce1c69 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c +@@ -0,0 +1,485 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0000aaaa; ++ *((int *)&__m256_op0[6]) = 0x00008bfe; ++ *((int *)&__m256_op0[5]) = 0x0000aaaa; ++ *((int *)&__m256_op0[4]) = 0x0000aaaa; ++ *((int *)&__m256_op0[3]) = 0x0000aaaa; ++ *((int *)&__m256_op0[2]) = 0x00008bfe; ++ *((int *)&__m256_op0[1]) = 0x0000aaaa; ++ *((int *)&__m256_op0[0]) = 0x0000aaaa; ++ *((unsigned long *)&__m256d_result[3]) = 0x3795554000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x37917fc000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x3795554000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x37917fc000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408010008080808; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0404010008080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0408010008080808; ++ *((int *)&__m256_result[7]) = 0x38808000; ++ *((int *)&__m256_result[6]) = 0x37800000; ++ *((int *)&__m256_result[5]) = 0x39010000; ++ *((int *)&__m256_result[4]) = 0x39010000; ++ *((int *)&__m256_result[3]) = 0x38808000; ++ *((int *)&__m256_result[2]) = 0x37800000; ++ *((int *)&__m256_result[1]) = 0x39010000; ++ *((int *)&__m256_result[0]) = 0x39010000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000100010001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000100010001fffe; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvth_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00020006; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00020006; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00020006; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00020006; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x37b0003000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x37b0003000000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffff0; ++ *((int *)&__m256_op0[6]) = 0xfffffff0; ++ *((int *)&__m256_op0[5]) = 0xfffffff0; ++ *((int *)&__m256_op0[4]) = 0xfffffff0; ++ *((int *)&__m256_op0[3]) = 0xfffffff0; ++ *((int *)&__m256_op0[2]) = 0xfffffff0; ++ *((int *)&__m256_op0[1]) = 0xfffffff0; ++ *((int *)&__m256_op0[0]) = 0xfffffff0; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfffffffe00000000; ++ __m256d_out = __lasx_xvfcvth_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000; ++ *((int *)&__m256_result[7]) = 0xc6000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xc6000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00010000002fff9e; ++ *((int *)&__m256_result[7]) = 0x34000000; ++ *((int *)&__m256_result[6]) = 0xfff00000; ++ *((int *)&__m256_result[5]) = 0xfff6e000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x33800000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x363c0000; ++ *((int *)&__m256_result[0]) = 0xfff3c000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x80000000; ++ *((int *)&__m256_op0[5]) = 0x80000000; ++ *((int *)&__m256_op0[4]) = 0xff800000; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x80000000; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0xff800000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc1d75053f0000000; ++ *((int *)&__m256_result[7]) = 0xc03ae000; ++ *((int *)&__m256_result[6]) = 0x420a6000; ++ *((int *)&__m256_result[5]) = 0xc6000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xc03ae000; ++ *((int *)&__m256_result[2]) = 0x420a6000; ++ *((int *)&__m256_result[1]) = 0xc6000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x03802fc000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x03802fc000000000; ++ *((int *)&__m256_result[7]) = 0x38600000; ++ *((int *)&__m256_result[6]) = 0x3df80000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x38600000; ++ *((int *)&__m256_result[2]) = 0x3df80000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c +new file mode 100644 +index 000000000..dd04fd788 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c +@@ -0,0 +1,375 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xbff0000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001700080; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x4177000800000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x4177000800000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc1f0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xc1f0000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256d_result[3]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256d_result[2]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256d_result[1]) = 0x437fe01fe01fe020; ++ *((unsigned long *)&__m256d_result[0]) = 0x437fe01fe01fe020; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x4393a0a5bc606060; ++ *((unsigned long *)&__m256d_result[2]) = 0x43b32feea9000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x4393a0a5bc606060; ++ *((unsigned long *)&__m256d_result[0]) = 0x43b32feea9000000; ++ __m256d_out = __lasx_xvffint_d_l (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0658f2dc0eb21e3c; ++ *((int *)&__m256_result[7]) = 0x4e5cba76; ++ *((int *)&__m256_result[6]) = 0xcdbaaa78; ++ *((int *)&__m256_result[5]) = 0xce68fdeb; ++ *((int *)&__m256_result[4]) = 0x4e33eaff; ++ *((int *)&__m256_result[3]) = 0x4e45cc2d; ++ *((int *)&__m256_result[2]) = 0xcda41b30; ++ *((int *)&__m256_result[1]) = 0x4ccb1e5c; ++ *((int *)&__m256_result[0]) = 0x4d6b21e4; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x4efffe00; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x47000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x4efffe00; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x47000000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff00; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x477f0000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x477f0000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000030000; ++ *((int *)&__m256_result[7]) = 0x49800080; ++ *((int *)&__m256_result[6]) = 0x48400000; ++ *((int *)&__m256_result[5]) = 0x49800080; ++ *((int *)&__m256_result[4]) = 0x48400000; ++ *((int *)&__m256_result[3]) = 0x49800080; ++ *((int *)&__m256_result[2]) = 0x48400000; ++ *((int *)&__m256_result[1]) = 0x49800080; ++ *((int *)&__m256_result[0]) = 0x48400000; ++ __m256_out = __lasx_xvffint_s_w (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x4f800000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc74180000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff884580000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0xbf800000; ++ *((int *)&__m256_result[6]) = 0xbf800000; ++ *((int *)&__m256_result[5]) = 0xd662fa00; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xbf800000; ++ *((int *)&__m256_result[2]) = 0xbf800000; ++ *((int *)&__m256_result[1]) = 0xd6ef7500; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((int *)&__m256_result[7]) = 0xdf000000; ++ *((int *)&__m256_result[6]) = 0x52a00000; ++ *((int *)&__m256_result[5]) = 0x5b7f00ff; ++ *((int *)&__m256_result[4]) = 0x5b7f00ff; ++ *((int *)&__m256_result[3]) = 0xdf000000; ++ *((int *)&__m256_result[2]) = 0x52a00000; ++ *((int *)&__m256_result[1]) = 0x5b7f00ff; ++ *((int *)&__m256_result[0]) = 0x5b7f00ff; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x5d20a0a1; ++ *((int *)&__m256_result[6]) = 0x5d20a0a1; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x5d20a0a1; ++ *((int *)&__m256_result[2]) = 0x5d20a0a1; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c +new file mode 100644 +index 000000000..3e2b15507 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c +@@ -0,0 +1,246 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x4370100000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x4370100000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256d_result[3]) = 0x43c0101010101010; ++ *((unsigned long *)&__m256d_result[2]) = 0x43c0101010101032; ++ *((unsigned long *)&__m256d_result[1]) = 0x43c0101010101010; ++ *((unsigned long *)&__m256d_result[0]) = 0x43c0101010101032; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40efffe09fa88260; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6b07ca8e013fbf01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40efffe09fa7e358; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80ce32be3e827f00; ++ *((unsigned long *)&__m256d_result[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256d_result[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256d_result[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256d_result[0]) = 0x43e019c657c7d050; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x43f0000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41f0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x41f0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41f0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41f0000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256d_result[3]) = 0x4380100810101008; ++ *((unsigned long *)&__m256d_result[2]) = 0x4380100810101008; ++ *((unsigned long *)&__m256d_result[1]) = 0x4380100810101008; ++ *((unsigned long *)&__m256d_result[0]) = 0x4380100810101008; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41f0000000000000; ++ __m256d_out = __lasx_xvffint_d_lu (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffbf7f00007fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffe651ffffbfff; ++ *((int *)&__m256_result[7]) = 0x4f800000; ++ *((int *)&__m256_result[6]) = 0x4f800000; ++ *((int *)&__m256_result[5]) = 0x4f7fffbf; ++ *((int *)&__m256_result[4]) = 0x46fffe00; ++ *((int *)&__m256_result[3]) = 0x4f800000; ++ *((int *)&__m256_result[2]) = 0x4f800000; ++ *((int *)&__m256_result[1]) = 0x4f7fffe6; ++ *((int *)&__m256_result[0]) = 0x4f7fffc0; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((int *)&__m256_result[7]) = 0x4b808080; ++ *((int *)&__m256_result[6]) = 0x4b808080; ++ *((int *)&__m256_result[5]) = 0x4f800000; ++ *((int *)&__m256_result[4]) = 0x4f7fffff; ++ *((int *)&__m256_result[3]) = 0x4b808080; ++ *((int *)&__m256_result[2]) = 0x4b808080; ++ *((int *)&__m256_result[1]) = 0x4f800000; ++ *((int *)&__m256_result[0]) = 0x4f800000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x41000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x41000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x41000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x41000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000020; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x42800000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x42000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x42800000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x42000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvffint_s_wu (__m256i_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c +new file mode 100644 +index 000000000..e310ff5ee +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c +@@ -0,0 +1,262 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0e2d5626ff75cdbc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5db4b156e2002a78; ++ *((unsigned long *)&__m256i_op0[1]) = 0xeeffbeb03ba3e6b0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0c16e25eb28d27ea; ++ *((unsigned long *)&__m256d_result[3]) = 0x41ac5aac4c000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc161464880000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xc1b1004150000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41cdd1f358000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000006f0000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000006f0000007f; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256d_result[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256d_result[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41d8585858400000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41dfffc000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41dfffdfffc00000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007f3a40; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffb79fb74; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc192181230000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xc192181230000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xbff0000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ffffffff00; ++ *((unsigned long *)&__m256d_result[3]) = 0x40efffe000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x40efffe000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41dffc0000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41dffc0000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256d_result[3]) = 0xc039000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc039000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xc039000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xc039000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffinth_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41d6600000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256d_result[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256d_result[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256d_result[0]) = 0xc1d75053f0000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x403f000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x403f000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256d_result[3]) = 0x416ee00000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x416ee000c0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x416ee00000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x416ee000c0000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff000000000080; ++ *((unsigned long *)&__m256d_result[3]) = 0x416fe00000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x4060000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x416fe00000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x4060000000000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x41cfe01dde000000; ++ __m256d_out = __lasx_xvffintl_d_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c +new file mode 100644 +index 000000000..bba1a06f3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c +@@ -0,0 +1,86 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xc08f780000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256d_result[1]) = 0xc08f780000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvflogb_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c +new file mode 100644 +index 000000000..b641c733f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c +@@ -0,0 +1,115 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x10101010; ++ *((int *)&__m256_op0[6]) = 0x10101012; ++ *((int *)&__m256_op0[5]) = 0x10101010; ++ *((int *)&__m256_op0[4]) = 0x10101012; ++ *((int *)&__m256_op0[3]) = 0x10101010; ++ *((int *)&__m256_op0[2]) = 0x10101093; ++ *((int *)&__m256_op0[1]) = 0x11111111; ++ *((int *)&__m256_op0[0]) = 0x11111113; ++ *((int *)&__m256_result[7]) = 0xc2be0000; ++ *((int *)&__m256_result[6]) = 0xc2be0000; ++ *((int *)&__m256_result[5]) = 0xc2be0000; ++ *((int *)&__m256_result[4]) = 0xc2be0000; ++ *((int *)&__m256_result[3]) = 0xc2be0000; ++ *((int *)&__m256_result[2]) = 0xc2be0000; ++ *((int *)&__m256_result[1]) = 0xc2ba0000; ++ *((int *)&__m256_result[0]) = 0xc2ba0000; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0xff800000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xff800000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000087; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000087; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xff800000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0xc30e0000; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0xff800000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0xc30e0000; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvflogb_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c +new file mode 100644 +index 000000000..c85c94bf6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c +@@ -0,0 +1,382 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xe37affb42fc05f69; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x65fb66c81da8e5ba; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256d_op2[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256d_op2[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256d_result[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256d_result[0]) = 0xe3aebaf4df958004; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x00020001ffb6ffe0; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0049004200000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xbf28b0686066be60; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0xc5c5c5c5c5c5c5c5; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2); ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000f1a40; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000aaaa0000aaaa; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0202810102020202; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0202810102020202; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x00007fff00000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256d_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x000000000000ffff; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256d_op1[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256d_op1[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000100010001; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffff000000; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256d_op2[2]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256d_op2[1]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256d_op2[0]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256d_op2[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_op2[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff5f5c; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000f1c00; ++ *((unsigned long *)&__m256d_op2[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256d_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op2[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256d_result[3]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x80000000ffff8c80; ++ *((unsigned long *)&__m256d_result[1]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x80000000fff0e400; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x80000000000001dc; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x80000000000001dc; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256d_op1[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256d_op2[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256d_op2[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256d_op2[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256d_op2[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256d_result[2]) = 0x80003fc00000428a; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256d_result[0]) = 0x80003fc00000428a; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000100000001; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000100000001; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffb2f600006f48; ++ __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c +new file mode 100644 +index 000000000..bde41dd5c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c +@@ -0,0 +1,720 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xf328dfff; ++ *((int *)&__m256_op1[1]) = 0x6651bfff; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x0000ffff; ++ *((int *)&__m256_op2[6]) = 0x0000ff80; ++ *((int *)&__m256_op2[5]) = 0x00004686; ++ *((int *)&__m256_op2[4]) = 0x00007f79; ++ *((int *)&__m256_op2[3]) = 0x0000ffff; ++ *((int *)&__m256_op2[2]) = 0x0000ffff; ++ *((int *)&__m256_op2[1]) = 0x0000f328; ++ *((int *)&__m256_op2[0]) = 0x0000dfff; ++ *((int *)&__m256_result[7]) = 0x0000ffff; ++ *((int *)&__m256_result[6]) = 0x0000ff80; ++ *((int *)&__m256_result[5]) = 0x00004686; ++ *((int *)&__m256_result[4]) = 0x00007f79; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0x0000ffff; ++ *((int *)&__m256_result[1]) = 0x0000f328; ++ *((int *)&__m256_result[0]) = 0x0000dfff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfff10000; ++ *((int *)&__m256_op0[4]) = 0xfff10000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfff10000; ++ *((int *)&__m256_op0[0]) = 0xfff10000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xfff10000; ++ *((int *)&__m256_result[4]) = 0xfff10000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xfff10000; ++ *((int *)&__m256_result[0]) = 0xfff10000; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x803f6004; ++ *((int *)&__m256_op2[4]) = 0x1f636003; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x803f6004; ++ *((int *)&__m256_op2[0]) = 0x1f636003; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x803f6004; ++ *((int *)&__m256_result[4]) = 0x1f636003; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x803f6004; ++ *((int *)&__m256_result[0]) = 0x1f636003; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffb3430a; ++ *((int *)&__m256_op0[4]) = 0x006ed8b8; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffb3430a; ++ *((int *)&__m256_op0[0]) = 0x006ed8b8; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x000001ff; ++ *((int *)&__m256_op1[4]) = 0x000003fe; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x000001ff; ++ *((int *)&__m256_op1[0]) = 0x000003fe; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x000000ff; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x000000ff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xfff3430a; ++ *((int *)&__m256_result[4]) = 0x000000ff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xfff3430a; ++ *((int *)&__m256_result[0]) = 0x000000ff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffeb683; ++ *((int *)&__m256_op0[6]) = 0x9ffffd80; ++ *((int *)&__m256_op0[5]) = 0xfffe97c0; ++ *((int *)&__m256_op0[4]) = 0x20010001; ++ *((int *)&__m256_op0[3]) = 0xfffeb683; ++ *((int *)&__m256_op0[2]) = 0x9ffffd80; ++ *((int *)&__m256_op0[1]) = 0xfffe97c0; ++ *((int *)&__m256_op0[0]) = 0x20010001; ++ *((int *)&__m256_op1[7]) = 0x00009fff; ++ *((int *)&__m256_op1[6]) = 0x9ffffd80; ++ *((int *)&__m256_op1[5]) = 0x0000ffff; ++ *((int *)&__m256_op1[4]) = 0x20010001; ++ *((int *)&__m256_op1[3]) = 0x00009fff; ++ *((int *)&__m256_op1[2]) = 0x9ffffd80; ++ *((int *)&__m256_op1[1]) = 0x0000ffff; ++ *((int *)&__m256_op1[0]) = 0x20010001; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00002080; ++ *((int *)&__m256_op2[4]) = 0xdf5b41cf; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00002080; ++ *((int *)&__m256_op2[0]) = 0xdf5b41cf; ++ *((int *)&__m256_result[7]) = 0xfffeb683; ++ *((int *)&__m256_result[6]) = 0x007ffd80; ++ *((int *)&__m256_result[5]) = 0xfffe97c0; ++ *((int *)&__m256_result[4]) = 0xdf5b41cf; ++ *((int *)&__m256_result[3]) = 0xfffeb683; ++ *((int *)&__m256_result[2]) = 0x007ffd80; ++ *((int *)&__m256_result[1]) = 0xfffe97c0; ++ *((int *)&__m256_result[0]) = 0xdf5b41cf; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xfffeb664; ++ *((int *)&__m256_op1[6]) = 0x007ffd61; ++ *((int *)&__m256_op1[5]) = 0xfffe97a1; ++ *((int *)&__m256_op1[4]) = 0xdf5b41b0; ++ *((int *)&__m256_op1[3]) = 0xfffeb664; ++ *((int *)&__m256_op1[2]) = 0x007ffd61; ++ *((int *)&__m256_op1[1]) = 0xfffe97a1; ++ *((int *)&__m256_op1[0]) = 0xdf5b41b0; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x94d7fb52; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xfffeb664; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xfffe97a1; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xfffeb664; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xfffe97a1; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xb70036db; ++ *((int *)&__m256_op1[6]) = 0x12c4007e; ++ *((int *)&__m256_op1[5]) = 0xb7146213; ++ *((int *)&__m256_op1[4]) = 0xfc1e0049; ++ *((int *)&__m256_op1[3]) = 0x000000fe; ++ *((int *)&__m256_op1[2]) = 0xfe02fffe; ++ *((int *)&__m256_op1[1]) = 0xb71c413b; ++ *((int *)&__m256_op1[0]) = 0x199d04b5; ++ *((int *)&__m256_op2[7]) = 0xb70036db; ++ *((int *)&__m256_op2[6]) = 0x12c4007e; ++ *((int *)&__m256_op2[5]) = 0xb7146213; ++ *((int *)&__m256_op2[4]) = 0xfc1e0049; ++ *((int *)&__m256_op2[3]) = 0x000000fe; ++ *((int *)&__m256_op2[2]) = 0xfe02fffe; ++ *((int *)&__m256_op2[1]) = 0xb71c413b; ++ *((int *)&__m256_op2[0]) = 0x199d04b5; ++ *((int *)&__m256_result[7]) = 0x370036db; ++ *((int *)&__m256_result[6]) = 0x92c4007e; ++ *((int *)&__m256_result[5]) = 0x37146213; ++ *((int *)&__m256_result[4]) = 0x7c1e0049; ++ *((int *)&__m256_result[3]) = 0x800000fe; ++ *((int *)&__m256_result[2]) = 0x7e02fffe; ++ *((int *)&__m256_result[1]) = 0x371c413b; ++ *((int *)&__m256_result[0]) = 0x999d04b5; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x3f7f7f7e; ++ *((int *)&__m256_op1[4]) = 0xff800000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x3f7f7f7e; ++ *((int *)&__m256_op1[0]) = 0xff800000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x7fffffff; ++ *((int *)&__m256_op2[4]) = 0xff7fffff; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x7fffffff; ++ *((int *)&__m256_op2[0]) = 0xff7fffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x7fffffff; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x7fffffff; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffafaf; ++ *((int *)&__m256_op0[4]) = 0xb3b3dc9d; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffafaf; ++ *((int *)&__m256_op0[0]) = 0xb3b3dc9d; ++ *((int *)&__m256_op1[7]) = 0x00020000; ++ *((int *)&__m256_op1[6]) = 0x00020000; ++ *((int *)&__m256_op1[5]) = 0x00220021; ++ *((int *)&__m256_op1[4]) = 0x004a007e; ++ *((int *)&__m256_op1[3]) = 0x00020000; ++ *((int *)&__m256_op1[2]) = 0x00020000; ++ *((int *)&__m256_op1[1]) = 0x00220021; ++ *((int *)&__m256_op1[0]) = 0x004a007e; ++ *((int *)&__m256_op2[7]) = 0x00000001; ++ *((int *)&__m256_op2[6]) = 0x00007f7f; ++ *((int *)&__m256_op2[5]) = 0x00000001; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000001; ++ *((int *)&__m256_op2[2]) = 0x00007f7f; ++ *((int *)&__m256_op2[1]) = 0x00000001; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000001; ++ *((int *)&__m256_result[6]) = 0x80007f7f; ++ *((int *)&__m256_result[5]) = 0xffffafaf; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000001; ++ *((int *)&__m256_result[2]) = 0x80007f7f; ++ *((int *)&__m256_result[1]) = 0xffffafaf; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0xffffffe5; ++ *((int *)&__m256_op2[6]) = 0xffffffe5; ++ *((int *)&__m256_op2[5]) = 0xffffffe5; ++ *((int *)&__m256_op2[4]) = 0xffffffe5; ++ *((int *)&__m256_op2[3]) = 0xffffffe5; ++ *((int *)&__m256_op2[2]) = 0xffffffe5; ++ *((int *)&__m256_op2[1]) = 0xffffffe5; ++ *((int *)&__m256_op2[0]) = 0xffffffe5; ++ *((int *)&__m256_result[7]) = 0xffffffe5; ++ *((int *)&__m256_result[6]) = 0xffffffe5; ++ *((int *)&__m256_result[5]) = 0xffffffe5; ++ *((int *)&__m256_result[4]) = 0xffffffe5; ++ *((int *)&__m256_result[3]) = 0xffffffe5; ++ *((int *)&__m256_result[2]) = 0xffffffe5; ++ *((int *)&__m256_result[1]) = 0xffffffe5; ++ *((int *)&__m256_result[0]) = 0xffffffe5; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xbfffffff; ++ *((int *)&__m256_op0[6]) = 0xffff8000; ++ *((int *)&__m256_op0[5]) = 0xbfff8000; ++ *((int *)&__m256_op0[4]) = 0x80000000; ++ *((int *)&__m256_op0[3]) = 0xbfffffff; ++ *((int *)&__m256_op0[2]) = 0xffff8000; ++ *((int *)&__m256_op0[1]) = 0xbfff8000; ++ *((int *)&__m256_op0[0]) = 0x80000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0xffff8000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0xffff8000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x02020102; ++ *((int *)&__m256_op1[6]) = 0x02020102; ++ *((int *)&__m256_op1[5]) = 0x02020102; ++ *((int *)&__m256_op1[4]) = 0x02020102; ++ *((int *)&__m256_op1[3]) = 0x02020102; ++ *((int *)&__m256_op1[2]) = 0x02020102; ++ *((int *)&__m256_op1[1]) = 0x02020102; ++ *((int *)&__m256_op1[0]) = 0x02020102; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000008; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000008; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000008; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000008; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000008; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000008; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000008; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000008; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000001; ++ *((int *)&__m256_op2[4]) = 0x00000001; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000001; ++ *((int *)&__m256_op2[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x80000001; ++ *((int *)&__m256_result[4]) = 0x80000001; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x80000001; ++ *((int *)&__m256_result[0]) = 0x80000001; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000040; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000040; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x40404040; ++ *((int *)&__m256_op2[6]) = 0x40404040; ++ *((int *)&__m256_op2[5]) = 0x40404040; ++ *((int *)&__m256_op2[4]) = 0x40404040; ++ *((int *)&__m256_op2[3]) = 0x40404040; ++ *((int *)&__m256_op2[2]) = 0x40404040; ++ *((int *)&__m256_op2[1]) = 0x40404040; ++ *((int *)&__m256_op2[0]) = 0x40404040; ++ *((int *)&__m256_result[7]) = 0xc0404040; ++ *((int *)&__m256_result[6]) = 0xc0404040; ++ *((int *)&__m256_result[5]) = 0xc0404040; ++ *((int *)&__m256_result[4]) = 0xc0404040; ++ *((int *)&__m256_result[3]) = 0xc0404040; ++ *((int *)&__m256_result[2]) = 0xc0404040; ++ *((int *)&__m256_result[1]) = 0xc0404040; ++ *((int *)&__m256_result[0]) = 0xc0404040; ++ __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c +new file mode 100644 +index 000000000..207ba167f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c +@@ -0,0 +1,230 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op1[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256d_op1[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256d_op1[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256d_op1[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256d_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256d_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000004290; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000002a96ba; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000004290; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000002a96ba; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101010100005400; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000004290; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000004290; ++ *((unsigned long *)&__m256d_result[0]) = 0x0101010100005400; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0101000101010001; ++ __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0101000101010001; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c +new file mode 100644 +index 000000000..9b7703231 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00010101; ++ *((int *)&__m256_op1[6]) = 0x01010101; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00010100; ++ *((int *)&__m256_op1[1]) = 0x00010000; ++ *((int *)&__m256_op1[0]) = 0x01000100; ++ *((int *)&__m256_result[7]) = 0x00010101; ++ *((int *)&__m256_result[6]) = 0x01010101; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00010100; ++ *((int *)&__m256_result[1]) = 0x00010000; ++ *((int *)&__m256_result[0]) = 0x01000100; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x59800000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x59800000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x59800000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x59800000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00010001; ++ *((int *)&__m256_op1[6]) = 0x00010001; ++ *((int *)&__m256_op1[5]) = 0x00010001; ++ *((int *)&__m256_op1[4]) = 0x00010001; ++ *((int *)&__m256_op1[3]) = 0x00010001; ++ *((int *)&__m256_op1[2]) = 0x00010001; ++ *((int *)&__m256_op1[1]) = 0x00010001; ++ *((int *)&__m256_op1[0]) = 0x00010001; ++ *((int *)&__m256_result[7]) = 0x00010001; ++ *((int *)&__m256_result[6]) = 0x00010001; ++ *((int *)&__m256_result[5]) = 0x00010001; ++ *((int *)&__m256_result[4]) = 0x00010001; ++ *((int *)&__m256_result[3]) = 0x00010001; ++ *((int *)&__m256_result[2]) = 0x00010001; ++ *((int *)&__m256_result[1]) = 0x00010001; ++ *((int *)&__m256_result[0]) = 0x00010001; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fefffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x7fefffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x000000ff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x000000ff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00003fe0; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00003fe0; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00003fe0; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00003fe0; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0000000e; ++ *((int *)&__m256_op1[6]) = 0x0000000e; ++ *((int *)&__m256_op1[5]) = 0x0000000e; ++ *((int *)&__m256_op1[4]) = 0x0000000e; ++ *((int *)&__m256_op1[3]) = 0x0000000e; ++ *((int *)&__m256_op1[2]) = 0x0000000e; ++ *((int *)&__m256_op1[1]) = 0x0000000e; ++ *((int *)&__m256_op1[0]) = 0x0000000e; ++ *((int *)&__m256_result[7]) = 0x0000000e; ++ *((int *)&__m256_result[6]) = 0x0000000e; ++ *((int *)&__m256_result[5]) = 0x0000000e; ++ *((int *)&__m256_result[4]) = 0x0000000e; ++ *((int *)&__m256_result[3]) = 0x0000000e; ++ *((int *)&__m256_result[2]) = 0x0000000e; ++ *((int *)&__m256_result[1]) = 0x0000000e; ++ *((int *)&__m256_result[0]) = 0x0000000e; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffdbbbcf; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffb8579f; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffdbbbcf; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffb8579f; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xfff8579f; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xfff8579f; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x40404040; ++ *((int *)&__m256_op1[6]) = 0x40404040; ++ *((int *)&__m256_op1[5]) = 0x40404040; ++ *((int *)&__m256_op1[4]) = 0x40404040; ++ *((int *)&__m256_op1[3]) = 0x40404040; ++ *((int *)&__m256_op1[2]) = 0x40404040; ++ *((int *)&__m256_op1[1]) = 0x40404040; ++ *((int *)&__m256_op1[0]) = 0x40404040; ++ *((int *)&__m256_result[7]) = 0x40404040; ++ *((int *)&__m256_result[6]) = 0x40404040; ++ *((int *)&__m256_result[5]) = 0x40404040; ++ *((int *)&__m256_result[4]) = 0x40404040; ++ *((int *)&__m256_result[3]) = 0x40404040; ++ *((int *)&__m256_result[2]) = 0x40404040; ++ *((int *)&__m256_result[1]) = 0x40404040; ++ *((int *)&__m256_result[0]) = 0x40404040; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x0000006d; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0010006d; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x0000006d; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0010006d; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00080040; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00080040; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00080040; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00080040; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00080040; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x0010006d; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00080040; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x0010006d; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x000002ff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x000002ff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x000002ff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x000002ff; ++ __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x7ff90000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x1ff60000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0xfffffffe; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0xfffffffe; ++ *((int *)&__m256_op1[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000001; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000001; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x0218ff78; ++ *((int *)&__m256_op1[6]) = 0xfc38fc38; ++ *((int *)&__m256_op1[5]) = 0xfc000000; ++ *((int *)&__m256_op1[4]) = 0x00000048; ++ *((int *)&__m256_op1[3]) = 0x0218ff78; ++ *((int *)&__m256_op1[2]) = 0xfc38fc38; ++ *((int *)&__m256_op1[1]) = 0xfc000000; ++ *((int *)&__m256_op1[0]) = 0x00000048; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xfc38fc38; ++ *((int *)&__m256_result[5]) = 0xfc000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xfc38fc38; ++ *((int *)&__m256_result[1]) = 0xfc000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x000000f0; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x000000f0; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x000000f0; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x000000f0; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffe7; ++ *((int *)&__m256_op0[6]) = 0xffffffe7; ++ *((int *)&__m256_op0[5]) = 0xffffffe7; ++ *((int *)&__m256_op0[4]) = 0xffffffe7; ++ *((int *)&__m256_op0[3]) = 0xffffffe7; ++ *((int *)&__m256_op0[2]) = 0xffffffe7; ++ *((int *)&__m256_op0[1]) = 0xffffffe7; ++ *((int *)&__m256_op0[0]) = 0xffffffe7; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c +new file mode 100644 +index 000000000..96bbb942d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c +@@ -0,0 +1,230 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000018; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000018; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000018; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000018; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0002000000010000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0002000000010000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000001; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op1[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x00000000000000ff; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7efefefe80ffffff; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c +new file mode 100644 +index 000000000..c73a8a74a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c +@@ -0,0 +1,506 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00005555; ++ *((int *)&__m256_op1[6]) = 0x00005555; ++ *((int *)&__m256_op1[5]) = 0x000307ff; ++ *((int *)&__m256_op1[4]) = 0xfe72e815; ++ *((int *)&__m256_op1[3]) = 0x00005555; ++ *((int *)&__m256_op1[2]) = 0x00005555; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000015; ++ *((int *)&__m256_result[7]) = 0x00005555; ++ *((int *)&__m256_result[6]) = 0x00005555; ++ *((int *)&__m256_result[5]) = 0x000307ff; ++ *((int *)&__m256_result[4]) = 0xfe72e815; ++ *((int *)&__m256_result[3]) = 0x00005555; ++ *((int *)&__m256_result[2]) = 0x00005555; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000015; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x000c0000; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00040000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00ff00ff; ++ *((int *)&__m256_result[6]) = 0x00ff00ff; ++ *((int *)&__m256_result[5]) = 0x00ff00ff; ++ *((int *)&__m256_result[4]) = 0x000c0000; ++ *((int *)&__m256_result[3]) = 0x00ff00ff; ++ *((int *)&__m256_result[2]) = 0x00ff00ff; ++ *((int *)&__m256_result[1]) = 0x00ff00ff; ++ *((int *)&__m256_result[0]) = 0x00040000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x000007ff; ++ *((int *)&__m256_op0[6]) = 0x000007ff; ++ *((int *)&__m256_op0[5]) = 0x000007ff; ++ *((int *)&__m256_op0[4]) = 0xfffff800; ++ *((int *)&__m256_op0[3]) = 0x000007ff; ++ *((int *)&__m256_op0[2]) = 0x000007ff; ++ *((int *)&__m256_op0[1]) = 0x000007ff; ++ *((int *)&__m256_op0[0]) = 0xfffff800; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x000007ff; ++ *((int *)&__m256_result[6]) = 0x000007ff; ++ *((int *)&__m256_result[5]) = 0x000007ff; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x000007ff; ++ *((int *)&__m256_result[2]) = 0x000007ff; ++ *((int *)&__m256_result[1]) = 0x000007ff; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x001f00e0; ++ *((int *)&__m256_op0[4]) = 0x1f1f1fff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x001f00e0; ++ *((int *)&__m256_op0[0]) = 0x1f1f1fff; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0x80000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0xff800000; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0x80000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0xff800000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000001; ++ *((int *)&__m256_result[5]) = 0x001f00e0; ++ *((int *)&__m256_result[4]) = 0xff800000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000001; ++ *((int *)&__m256_result[1]) = 0x001f00e0; ++ *((int *)&__m256_result[0]) = 0xff800000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000001; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00009fff; ++ *((int *)&__m256_op0[6]) = 0x00002001; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00009fff; ++ *((int *)&__m256_op0[2]) = 0x00002001; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0xfffeb683; ++ *((int *)&__m256_op1[6]) = 0x9ffffd80; ++ *((int *)&__m256_op1[5]) = 0xfffe97c0; ++ *((int *)&__m256_op1[4]) = 0x20010001; ++ *((int *)&__m256_op1[3]) = 0xfffeb683; ++ *((int *)&__m256_op1[2]) = 0x9ffffd80; ++ *((int *)&__m256_op1[1]) = 0xfffe97c0; ++ *((int *)&__m256_op1[0]) = 0x20010001; ++ *((int *)&__m256_result[7]) = 0x00009fff; ++ *((int *)&__m256_result[6]) = 0x9ffffd80; ++ *((int *)&__m256_result[5]) = 0x0000ffff; ++ *((int *)&__m256_result[4]) = 0x20010001; ++ *((int *)&__m256_result[3]) = 0x00009fff; ++ *((int *)&__m256_result[2]) = 0x9ffffd80; ++ *((int *)&__m256_result[1]) = 0x0000ffff; ++ *((int *)&__m256_result[0]) = 0x20010001; ++ __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000170; ++ *((int *)&__m256_op0[6]) = 0x00000080; ++ *((int *)&__m256_op0[5]) = 0xc0650055; ++ *((int *)&__m256_op0[4]) = 0x0055ffab; ++ *((int *)&__m256_op0[3]) = 0x00000170; ++ *((int *)&__m256_op0[2]) = 0x00000080; ++ *((int *)&__m256_op0[1]) = 0xc0650055; ++ *((int *)&__m256_op0[0]) = 0x0055ffab; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffff0000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffff0000; ++ *((int *)&__m256_op1[7]) = 0xfffefffe; ++ *((int *)&__m256_op1[6]) = 0xfffefffe; ++ *((int *)&__m256_op1[5]) = 0xfffefffe; ++ *((int *)&__m256_op1[4]) = 0xfffefffe; ++ *((int *)&__m256_op1[3]) = 0xfffefffe; ++ *((int *)&__m256_op1[2]) = 0xfffefffe; ++ *((int *)&__m256_op1[1]) = 0xfffefffe; ++ *((int *)&__m256_op1[0]) = 0xfffefffe; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffff0000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffff0000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00fe01f0; ++ *((int *)&__m256_op0[6]) = 0x00010000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00c40086; ++ *((int *)&__m256_op0[3]) = 0x00fe01f0; ++ *((int *)&__m256_op0[2]) = 0x00010000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00c40086; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x82a54290; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x028aa700; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x82a54290; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x02a54287; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00010000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00c40086; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00010000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00c40086; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x02a54290; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0154dc84; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x02a54290; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000089; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x02a54290; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0154dc84; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x02a54290; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000089; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x02a54290; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x0154dc84; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x02a54290; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000089; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x04000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x04000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00100000; ++ *((int *)&__m256_op0[6]) = 0x00100000; ++ *((int *)&__m256_op0[5]) = 0x00100000; ++ *((int *)&__m256_op0[4]) = 0x00100000; ++ *((int *)&__m256_op0[3]) = 0x00100000; ++ *((int *)&__m256_op0[2]) = 0x00100000; ++ *((int *)&__m256_op0[1]) = 0x00100000; ++ *((int *)&__m256_op0[0]) = 0x00100000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000010; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000010; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000080; ++ *((int *)&__m256_op0[6]) = 0x00000080; ++ *((int *)&__m256_op0[5]) = 0x00000080; ++ *((int *)&__m256_op0[4]) = 0x00000080; ++ *((int *)&__m256_op0[3]) = 0x00000080; ++ *((int *)&__m256_op0[2]) = 0x00000080; ++ *((int *)&__m256_op0[1]) = 0x00000080; ++ *((int *)&__m256_op0[0]) = 0x00000080; ++ *((int *)&__m256_op1[7]) = 0x00000001; ++ *((int *)&__m256_op1[6]) = 0x00000001; ++ *((int *)&__m256_op1[5]) = 0x00000001; ++ *((int *)&__m256_op1[4]) = 0x00000001; ++ *((int *)&__m256_op1[3]) = 0x00000001; ++ *((int *)&__m256_op1[2]) = 0x00000001; ++ *((int *)&__m256_op1[1]) = 0x00000001; ++ *((int *)&__m256_op1[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000001; ++ *((int *)&__m256_result[6]) = 0x00000001; ++ *((int *)&__m256_result[5]) = 0x00000001; ++ *((int *)&__m256_result[4]) = 0x00000001; ++ *((int *)&__m256_result[3]) = 0x00000001; ++ *((int *)&__m256_result[2]) = 0x00000001; ++ *((int *)&__m256_result[1]) = 0x00000001; ++ *((int *)&__m256_result[0]) = 0x00000001; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c +new file mode 100644 +index 000000000..d161c850c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c +@@ -0,0 +1,324 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0001000001000100; ++ *((unsigned long *)&__m256d_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[0]) = 0xffffffffe651bfff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe651bfff; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256d_op2[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256d_op2[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256d_op2[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256d_result[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256d_result[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256d_result[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256d_result[0]) = 0xa020202020206431; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256d_op0[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256d_op0[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256d_op2[2]) = 0x7f7f7f5c8f374980; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256d_op2[0]) = 0x7f7f7f5c8f374980; ++ *((unsigned long *)&__m256d_result[3]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x8001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256d_op1[3]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256d_op1[1]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1080108010060002; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1080108010060002; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256d_op2[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7fff00017fff0000; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1716151417161514; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1716151417161514; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1716151417161514; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1716151417161514; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000002780; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000002780; ++ __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256d_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256d_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256d_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffba0c05; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000005000000020; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000005000000020; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0008000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffff800300000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000700000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000700000000; ++ *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c +new file mode 100644 +index 000000000..c5e9576ea +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c +@@ -0,0 +1,895 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffffff; ++ *((int *)&__m256_op1[6]) = 0xffff5f5c; ++ *((int *)&__m256_op1[5]) = 0xffffffff; ++ *((int *)&__m256_op1[4]) = 0xffff5f5c; ++ *((int *)&__m256_op1[3]) = 0xffffffff; ++ *((int *)&__m256_op1[2]) = 0xffff5f5c; ++ *((int *)&__m256_op1[1]) = 0xffffffff; ++ *((int *)&__m256_op1[0]) = 0xffff5f5c; ++ *((int *)&__m256_op2[7]) = 0x0000000f; ++ *((int *)&__m256_op2[6]) = 0x0000000f; ++ *((int *)&__m256_op2[5]) = 0xff00ff0f; ++ *((int *)&__m256_op2[4]) = 0xff005f0f; ++ *((int *)&__m256_op2[3]) = 0x0000000f; ++ *((int *)&__m256_op2[2]) = 0x0000000f; ++ *((int *)&__m256_op2[1]) = 0xff00ff0f; ++ *((int *)&__m256_op2[0]) = 0xff005f0f; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffff5f5c; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffff5f5c; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffff5f5c; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffff5f5c; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00010001; ++ *((int *)&__m256_op0[6]) = 0x00010000; ++ *((int *)&__m256_op0[5]) = 0x020afefb; ++ *((int *)&__m256_op0[4]) = 0x08140000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x0003fffc; ++ *((int *)&__m256_op0[0]) = 0x00060000; ++ *((int *)&__m256_op1[7]) = 0x80000000; ++ *((int *)&__m256_op1[6]) = 0x40000000; ++ *((int *)&__m256_op1[5]) = 0x40000000; ++ *((int *)&__m256_op1[4]) = 0x10000010; ++ *((int *)&__m256_op1[3]) = 0x80000000; ++ *((int *)&__m256_op1[2]) = 0x40000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x40000010; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x000000ff; ++ *((int *)&__m256_op2[4]) = 0x0001ffff; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x0000ffff; ++ *((int *)&__m256_op2[0]) = 0x00010000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80020000; ++ *((int *)&__m256_result[5]) = 0x828aff0b; ++ *((int *)&__m256_result[4]) = 0x8001ffff; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000002; ++ *((int *)&__m256_result[1]) = 0x8000ffff; ++ *((int *)&__m256_result[0]) = 0x800d0002; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1f3d2101; ++ *((int *)&__m256_op0[6]) = 0x1f3d2101; ++ *((int *)&__m256_op0[5]) = 0x1f3d2101; ++ *((int *)&__m256_op0[4]) = 0xd07dbf01; ++ *((int *)&__m256_op0[3]) = 0x9f1fd080; ++ *((int *)&__m256_op0[2]) = 0x1f3d2101; ++ *((int *)&__m256_op0[1]) = 0x1f3d2101; ++ *((int *)&__m256_op0[0]) = 0xd07dbf01; ++ *((int *)&__m256_op1[7]) = 0x1d949d94; ++ *((int *)&__m256_op1[6]) = 0x9d949d95; ++ *((int *)&__m256_op1[5]) = 0x1d949d94; ++ *((int *)&__m256_op1[4]) = 0x9e1423d4; ++ *((int *)&__m256_op1[3]) = 0x1de9a03f; ++ *((int *)&__m256_op1[2]) = 0x3dd41d95; ++ *((int *)&__m256_op1[1]) = 0x1d949d94; ++ *((int *)&__m256_op1[0]) = 0x9e1423d4; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x8001b72e; ++ *((int *)&__m256_result[6]) = 0x0001b72e; ++ *((int *)&__m256_result[5]) = 0x8001b72e; ++ *((int *)&__m256_result[4]) = 0xaf12d5f0; ++ *((int *)&__m256_result[3]) = 0x00024763; ++ *((int *)&__m256_result[2]) = 0x9d9cb530; ++ *((int *)&__m256_result[1]) = 0x8001b72e; ++ *((int *)&__m256_result[0]) = 0xaf12d5f0; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1f0fdf7f; ++ *((int *)&__m256_op0[6]) = 0x3e3b31d4; ++ *((int *)&__m256_op0[5]) = 0x7ff80000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x1f0fdf7f; ++ *((int *)&__m256_op0[2]) = 0x3e3b31d4; ++ *((int *)&__m256_op0[1]) = 0x7ff80000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x7ff80000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x7ff80000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x80000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x0000ffff; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x0000ffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000001; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000001; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000001; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000001; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000001; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000001; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000001; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000200; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000200; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000200; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000200; ++ *((int *)&__m256_op2[7]) = 0xffffffa0; ++ *((int *)&__m256_op2[6]) = 0x00000001; ++ *((int *)&__m256_op2[5]) = 0xffffffe0; ++ *((int *)&__m256_op2[4]) = 0x00000001; ++ *((int *)&__m256_op2[3]) = 0xffffffa0; ++ *((int *)&__m256_op2[2]) = 0x00000001; ++ *((int *)&__m256_op2[1]) = 0xffffffe0; ++ *((int *)&__m256_op2[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0xffffffa0; ++ *((int *)&__m256_result[6]) = 0x80000001; ++ *((int *)&__m256_result[5]) = 0xffffffe0; ++ *((int *)&__m256_result[4]) = 0x80000001; ++ *((int *)&__m256_result[3]) = 0xffffffa0; ++ *((int *)&__m256_result[2]) = 0x80000001; ++ *((int *)&__m256_result[1]) = 0xffffffe0; ++ *((int *)&__m256_result[0]) = 0x80000001; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x49810081; ++ *((int *)&__m256_op1[6]) = 0x4843ffe1; ++ *((int *)&__m256_op1[5]) = 0x49810081; ++ *((int *)&__m256_op1[4]) = 0x68410001; ++ *((int *)&__m256_op1[3]) = 0x49810081; ++ *((int *)&__m256_op1[2]) = 0x4843ffe1; ++ *((int *)&__m256_op1[1]) = 0x49810081; ++ *((int *)&__m256_op1[0]) = 0x68410001; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00009fff; ++ *((int *)&__m256_op0[6]) = 0x00002001; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0x0000ffff; ++ *((int *)&__m256_op0[3]) = 0x00009fff; ++ *((int *)&__m256_op0[2]) = 0x00002001; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_op1[7]) = 0xfffeb683; ++ *((int *)&__m256_op1[6]) = 0x9ffffd80; ++ *((int *)&__m256_op1[5]) = 0xfffe97c0; ++ *((int *)&__m256_op1[4]) = 0x20010001; ++ *((int *)&__m256_op1[3]) = 0xfffeb683; ++ *((int *)&__m256_op1[2]) = 0x9ffffd80; ++ *((int *)&__m256_op1[1]) = 0xfffe97c0; ++ *((int *)&__m256_op1[0]) = 0x20010001; ++ *((int *)&__m256_op2[7]) = 0x00009fff; ++ *((int *)&__m256_op2[6]) = 0x00002001; ++ *((int *)&__m256_op2[5]) = 0x0000ffff; ++ *((int *)&__m256_op2[4]) = 0x0000ffff; ++ *((int *)&__m256_op2[3]) = 0x00009fff; ++ *((int *)&__m256_op2[2]) = 0x00002001; ++ *((int *)&__m256_op2[1]) = 0x0000ffff; ++ *((int *)&__m256_op2[0]) = 0x0000ffff; ++ *((int *)&__m256_result[7]) = 0xfffeb683; ++ *((int *)&__m256_result[6]) = 0x80002001; ++ *((int *)&__m256_result[5]) = 0xfffe97c0; ++ *((int *)&__m256_result[4]) = 0x8000ffff; ++ *((int *)&__m256_result[3]) = 0xfffeb683; ++ *((int *)&__m256_result[2]) = 0x80002001; ++ *((int *)&__m256_result[1]) = 0xfffe97c0; ++ *((int *)&__m256_result[0]) = 0x8000ffff; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fffffff; ++ *((int *)&__m256_op0[6]) = 0x80000000; ++ *((int *)&__m256_op0[5]) = 0x7fffffff; ++ *((int *)&__m256_op0[4]) = 0x80000000; ++ *((int *)&__m256_op0[3]) = 0x7fffffff; ++ *((int *)&__m256_op0[2]) = 0x80000000; ++ *((int *)&__m256_op0[1]) = 0x7fffffff; ++ *((int *)&__m256_op0[0]) = 0x80000000; ++ *((int *)&__m256_op1[7]) = 0xfd02fd02; ++ *((int *)&__m256_op1[6]) = 0xfd02fd02; ++ *((int *)&__m256_op1[5]) = 0xfd02fd02; ++ *((int *)&__m256_op1[4]) = 0xfd02fd02; ++ *((int *)&__m256_op1[3]) = 0xfd02fd02; ++ *((int *)&__m256_op1[2]) = 0xfd02fd02; ++ *((int *)&__m256_op1[1]) = 0xfd02fd02; ++ *((int *)&__m256_op1[0]) = 0xfd02fd02; ++ *((int *)&__m256_op2[7]) = 0xfd02fd02; ++ *((int *)&__m256_op2[6]) = 0xfd02fd02; ++ *((int *)&__m256_op2[5]) = 0xfd02fd02; ++ *((int *)&__m256_op2[4]) = 0xfd02fd02; ++ *((int *)&__m256_op2[3]) = 0xfd02fd02; ++ *((int *)&__m256_op2[2]) = 0xfd02fd02; ++ *((int *)&__m256_op2[1]) = 0xfd02fd02; ++ *((int *)&__m256_op2[0]) = 0xfd02fd02; ++ *((int *)&__m256_result[7]) = 0x7fffffff; ++ *((int *)&__m256_result[6]) = 0x7d02fd02; ++ *((int *)&__m256_result[5]) = 0x7fffffff; ++ *((int *)&__m256_result[4]) = 0x7d02fd02; ++ *((int *)&__m256_result[3]) = 0x7fffffff; ++ *((int *)&__m256_result[2]) = 0x7d02fd02; ++ *((int *)&__m256_result[1]) = 0x7fffffff; ++ *((int *)&__m256_result[0]) = 0x7d02fd02; ++ __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xbf7f7fff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xe651bfff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0xffffffff; ++ *((int *)&__m256_op2[2]) = 0xf328dfff; ++ *((int *)&__m256_op2[1]) = 0x6651bfff; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x07070707; ++ *((int *)&__m256_op0[5]) = 0x01020400; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00020100; ++ *((int *)&__m256_op0[1]) = 0x07030200; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0xffffff80; ++ *((int *)&__m256_op1[6]) = 0xfefeff00; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x01000400; ++ *((int *)&__m256_op1[3]) = 0xffffff80; ++ *((int *)&__m256_op1[2]) = 0xfeff0000; ++ *((int *)&__m256_op1[1]) = 0x02020080; ++ *((int *)&__m256_op1[0]) = 0x5c800400; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0xffffffff; ++ *((int *)&__m256_op2[2]) = 0xf328dfff; ++ *((int *)&__m256_op2[1]) = 0x6651bfff; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffff80; ++ *((int *)&__m256_result[6]) = 0x46867f79; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xf328dfff; ++ *((int *)&__m256_result[1]) = 0x6651bfff; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xe0000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xe0000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xe0000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xe0000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x80000000; ++ *((int *)&__m256_op1[4]) = 0x80000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x80000000; ++ *((int *)&__m256_op1[0]) = 0x80000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x7f800000; ++ *((int *)&__m256_op2[6]) = 0x7f800000; ++ *((int *)&__m256_op2[5]) = 0x7fc00000; ++ *((int *)&__m256_op2[4]) = 0x7fc00000; ++ *((int *)&__m256_op2[3]) = 0x7f800000; ++ *((int *)&__m256_op2[2]) = 0x7f800000; ++ *((int *)&__m256_op2[1]) = 0x7fc00000; ++ *((int *)&__m256_op2[0]) = 0x7fc00000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7fc00000; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7fc00000; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x7fefffff; ++ *((int *)&__m256_op1[6]) = 0xffffffff; ++ *((int *)&__m256_op1[5]) = 0x7fefffff; ++ *((int *)&__m256_op1[4]) = 0xffffffff; ++ *((int *)&__m256_op1[3]) = 0x7fefffff; ++ *((int *)&__m256_op1[2]) = 0xffffffff; ++ *((int *)&__m256_op1[1]) = 0x7fefffff; ++ *((int *)&__m256_op1[0]) = 0xffffffff; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7fefffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x7fefffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x7fefffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x7fefffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xf7f8f7f8; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00003f78; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xf7f8f7f8; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00003f78; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0xf7f8f7f8; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00003f78; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0xf7f8f7f8; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00003f78; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0xff800000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0xff800000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0xffffffff; ++ *((int *)&__m256_op2[4]) = 0xffffffff; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0xffffffff; ++ *((int *)&__m256_op2[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x01010100; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000405; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x01010100; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000405; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x01010100; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000405; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x01010100; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000405; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x01010100; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0x00000405; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x01010100; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0x00000405; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00800080; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000202; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00800080; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000202; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0xff88ff88; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0xff88ff88; ++ *((int *)&__m256_op2[7]) = 0x00000000; ++ *((int *)&__m256_op2[6]) = 0x00000000; ++ *((int *)&__m256_op2[5]) = 0x00000000; ++ *((int *)&__m256_op2[4]) = 0x00000000; ++ *((int *)&__m256_op2[3]) = 0x00000000; ++ *((int *)&__m256_op2[2]) = 0x00000000; ++ *((int *)&__m256_op2[1]) = 0x00000000; ++ *((int *)&__m256_op2[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x80000000; ++ *((int *)&__m256_result[4]) = 0xffc8ff88; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x80000000; ++ *((int *)&__m256_result[0]) = 0xffc8ff88; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x00000000; ++ *((int *)&__m256_op1[4]) = 0x00000000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x00000000; ++ *((int *)&__m256_op1[0]) = 0x00000000; ++ *((int *)&__m256_op2[7]) = 0x001fffff; ++ *((int *)&__m256_op2[6]) = 0xffffffff; ++ *((int *)&__m256_op2[5]) = 0xffffffff; ++ *((int *)&__m256_op2[4]) = 0xffffffff; ++ *((int *)&__m256_op2[3]) = 0x001fffff; ++ *((int *)&__m256_op2[2]) = 0xffffffff; ++ *((int *)&__m256_op2[1]) = 0xffffffff; ++ *((int *)&__m256_op2[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x001fffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x001fffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_op1[7]) = 0x00000000; ++ *((int *)&__m256_op1[6]) = 0x00000000; ++ *((int *)&__m256_op1[5]) = 0x7fff8000; ++ *((int *)&__m256_op1[4]) = 0x7fff0000; ++ *((int *)&__m256_op1[3]) = 0x00000000; ++ *((int *)&__m256_op1[2]) = 0x00000000; ++ *((int *)&__m256_op1[1]) = 0x7fff8000; ++ *((int *)&__m256_op1[0]) = 0x7fff0000; ++ *((int *)&__m256_op2[7]) = 0xffffffff; ++ *((int *)&__m256_op2[6]) = 0xffffffff; ++ *((int *)&__m256_op2[5]) = 0xffffffff; ++ *((int *)&__m256_op2[4]) = 0xffffff10; ++ *((int *)&__m256_op2[3]) = 0xffffffff; ++ *((int *)&__m256_op2[2]) = 0xffffffff; ++ *((int *)&__m256_op2[1]) = 0xffffffff; ++ *((int *)&__m256_op2[0]) = 0xffffff10; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffff10; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffff10; ++ __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c +new file mode 100644 +index 000000000..4babf1638 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c +@@ -0,0 +1,429 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x0); ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000008050501; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000008050501; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_result[0]) = 0xfffffffffffffff8; ++ __m256d_out = __lasx_xvfrint_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7c00000880008000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_result[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256d_result[0]) = 0x6040190d00000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op0[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256d_op0[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x4084800000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x4084800000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_result[3]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_result[2]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_result[1]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m256d_result[0]) = 0xffff0001ffff0001; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x3fffbfff80000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00004000007f8000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3fffbfff80000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00004000007f8000; ++ *((unsigned long *)&__m256d_result[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrne_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x3ff0000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xfffffefe00000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256d_result[3]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x3ff0000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256d_op0[2]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256d_op0[0]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256d_result[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256d_result[0]) = 0x3ff0000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256d_result[3]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xfc00000000000048; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_result[3]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_result[2]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_result[1]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256d_result[0]) = 0xfffffff0fffffff0; ++ __m256d_out = __lasx_xvfrintrp_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x017e017e01dd61de; ++ *((unsigned long *)&__m256d_op0[2]) = 0x5d637d043bc4fc43; ++ *((unsigned long *)&__m256d_op0[1]) = 0x01dcc2dce31bc35d; ++ *((unsigned long *)&__m256d_op0[0]) = 0x5e041d245b85fc43; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x5d637d043bc4fc43; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x5e041d245b85fc43; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_result[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_result[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_result[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256d_result[0]) = 0x7c007c007c007c00; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5); ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrm_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000800000098; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000040000ffca; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000800000098; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000040000ff79; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfrintrz_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c +new file mode 100644 +index 000000000..9f2fa6747 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c +@@ -0,0 +1,723 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffff5f5c; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffff605a; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffff5f5c; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffff605a; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffff5f5c; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffff605a; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffff5f5c; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffff605a; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xc5c5c5c4; ++ *((int *)&__m256_op0[6]) = 0xc5c5c5c4; ++ *((int *)&__m256_op0[5]) = 0x45c5c5c5; ++ *((int *)&__m256_op0[4]) = 0x45c5c5c5; ++ *((int *)&__m256_op0[3]) = 0xc5c5c5c4; ++ *((int *)&__m256_op0[2]) = 0xc5c5c5c4; ++ *((int *)&__m256_op0[1]) = 0x45c5c5c5; ++ *((int *)&__m256_op0[0]) = 0x45c5c5c5; ++ *((int *)&__m256_result[7]) = 0xc5c5c800; ++ *((int *)&__m256_result[6]) = 0xc5c5c800; ++ *((int *)&__m256_result[5]) = 0x45c5c800; ++ *((int *)&__m256_result[4]) = 0x45c5c800; ++ *((int *)&__m256_result[3]) = 0xc5c5c800; ++ *((int *)&__m256_result[2]) = 0xc5c5c800; ++ *((int *)&__m256_result[1]) = 0x45c5c800; ++ *((int *)&__m256_result[0]) = 0x45c5c800; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffff6f20; ++ *((int *)&__m256_op0[5]) = 0x0000781e; ++ *((int *)&__m256_op0[4]) = 0x0000f221; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffff6f20; ++ *((int *)&__m256_op0[1]) = 0x0000781e; ++ *((int *)&__m256_op0[0]) = 0x0000f221; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xffff6f20; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xffff6f20; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffb3b4; ++ *((int *)&__m256_op0[5]) = 0xfffffff5; ++ *((int *)&__m256_op0[4]) = 0xffff4738; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffb3b4; ++ *((int *)&__m256_op0[1]) = 0xfffffff5; ++ *((int *)&__m256_op0[0]) = 0xffff4738; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xffffb3b4; ++ *((int *)&__m256_result[5]) = 0xfffffff5; ++ *((int *)&__m256_result[4]) = 0xffff4738; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xffffb3b4; ++ *((int *)&__m256_result[1]) = 0xfffffff5; ++ *((int *)&__m256_result[0]) = 0xffff4738; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00ff0000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00ff0000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00ff0000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00ff0000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00003fea; ++ *((int *)&__m256_op0[6]) = 0x00013feb; ++ *((int *)&__m256_op0[5]) = 0x00003fe9; ++ *((int *)&__m256_op0[4]) = 0x00014022; ++ *((int *)&__m256_op0[3]) = 0x00003fea; ++ *((int *)&__m256_op0[2]) = 0x00013feb; ++ *((int *)&__m256_op0[1]) = 0x00003fe9; ++ *((int *)&__m256_op0[0]) = 0x00014022; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrint_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x01010101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x01010101; ++ *((int *)&__m256_op0[6]) = 0x01010101; ++ *((int *)&__m256_op0[5]) = 0x01010101; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x01010101; ++ *((int *)&__m256_op0[2]) = 0x01010101; ++ *((int *)&__m256_op0[1]) = 0x01010101; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrne_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x55555555; ++ *((int *)&__m256_op0[6]) = 0x36aaaaac; ++ *((int *)&__m256_op0[5]) = 0x55555555; ++ *((int *)&__m256_op0[4]) = 0xaaaaaaac; ++ *((int *)&__m256_op0[3]) = 0x55555555; ++ *((int *)&__m256_op0[2]) = 0x36aaaaac; ++ *((int *)&__m256_op0[1]) = 0x55555555; ++ *((int *)&__m256_op0[0]) = 0xaaaaaaac; ++ *((int *)&__m256_result[7]) = 0x55555555; ++ *((int *)&__m256_result[6]) = 0x3f800000; ++ *((int *)&__m256_result[5]) = 0x55555555; ++ *((int *)&__m256_result[4]) = 0x80000000; ++ *((int *)&__m256_result[3]) = 0x55555555; ++ *((int *)&__m256_result[2]) = 0x3f800000; ++ *((int *)&__m256_result[1]) = 0x55555555; ++ *((int *)&__m256_result[0]) = 0x80000000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffc741; ++ *((int *)&__m256_op0[6]) = 0x8a023680; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffff8845; ++ *((int *)&__m256_op0[2]) = 0xbb954b00; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffc741; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0xffff8845; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00200101; ++ *((int *)&__m256_op0[6]) = 0x01610000; ++ *((int *)&__m256_op0[5]) = 0x00612000; ++ *((int *)&__m256_op0[4]) = 0x00610000; ++ *((int *)&__m256_op0[3]) = 0x00200101; ++ *((int *)&__m256_op0[2]) = 0x01610000; ++ *((int *)&__m256_op0[1]) = 0x00612000; ++ *((int *)&__m256_op0[0]) = 0x00610000; ++ *((int *)&__m256_result[7]) = 0x3f800000; ++ *((int *)&__m256_result[6]) = 0x3f800000; ++ *((int *)&__m256_result[5]) = 0x3f800000; ++ *((int *)&__m256_result[4]) = 0x3f800000; ++ *((int *)&__m256_result[3]) = 0x3f800000; ++ *((int *)&__m256_result[2]) = 0x3f800000; ++ *((int *)&__m256_result[1]) = 0x3f800000; ++ *((int *)&__m256_result[0]) = 0x3f800000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfefefefe; ++ *((int *)&__m256_op0[4]) = 0x01010101; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfefefefe; ++ *((int *)&__m256_op0[0]) = 0x01010101; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0xfefefefe; ++ *((int *)&__m256_result[4]) = 0x3f800000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0xfefefefe; ++ *((int *)&__m256_result[0]) = 0x3f800000; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1c1c1c1c; ++ *((int *)&__m256_op0[6]) = 0x1c1c1c1c; ++ *((int *)&__m256_op0[5]) = 0xfffffffe; ++ *((int *)&__m256_op0[4]) = 0xffffff00; ++ *((int *)&__m256_op0[3]) = 0x1c1c1c1c; ++ *((int *)&__m256_op0[2]) = 0x1c1c1c1c; ++ *((int *)&__m256_op0[1]) = 0xfffffffe; ++ *((int *)&__m256_op0[0]) = 0xffffff00; ++ *((int *)&__m256_result[7]) = 0x3f800000; ++ *((int *)&__m256_result[6]) = 0x3f800000; ++ *((int *)&__m256_result[5]) = 0xfffffffe; ++ *((int *)&__m256_result[4]) = 0xffffff00; ++ *((int *)&__m256_result[3]) = 0x3f800000; ++ *((int *)&__m256_result[2]) = 0x3f800000; ++ *((int *)&__m256_result[1]) = 0xfffffffe; ++ *((int *)&__m256_result[0]) = 0xffffff00; ++ __m256_out = __lasx_xvfrintrp_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000008; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00080000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x5d20a0a1; ++ *((int *)&__m256_op0[6]) = 0x5d20a0a1; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x5d20a0a1; ++ *((int *)&__m256_op0[2]) = 0x5d20a0a1; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x5d20a0a1; ++ *((int *)&__m256_result[6]) = 0x5d20a0a1; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x5d20a0a1; ++ *((int *)&__m256_result[2]) = 0x5d20a0a1; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x001d001d; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x001d001d; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000033; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000033; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrm_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000300; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000303; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xfffffffe; ++ *((int *)&__m256_op0[5]) = 0xfffffffe; ++ *((int *)&__m256_op0[4]) = 0xfffffefc; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xfffffffe; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xfffffffe; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xfffffffe; ++ *((int *)&__m256_result[5]) = 0xfffffffe; ++ *((int *)&__m256_result[4]) = 0xfffffefc; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xfffffffe; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xfffffffe; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x0001c4e8; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x0001c4e8; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x80000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x80000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0x80000000; ++ *((int *)&__m256_result[6]) = 0x80000000; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0x80000000; ++ *((int *)&__m256_result[2]) = 0x80000000; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xf5fffc00; ++ *((int *)&__m256_op0[6]) = 0xfc000000; ++ *((int *)&__m256_op0[5]) = 0xf5fffc00; ++ *((int *)&__m256_op0[4]) = 0xfc000000; ++ *((int *)&__m256_op0[3]) = 0xf5fffc00; ++ *((int *)&__m256_op0[2]) = 0xfc000000; ++ *((int *)&__m256_op0[1]) = 0xf5fffc00; ++ *((int *)&__m256_op0[0]) = 0xfc000000; ++ *((int *)&__m256_result[7]) = 0xf5fffc00; ++ *((int *)&__m256_result[6]) = 0xfc000000; ++ *((int *)&__m256_result[5]) = 0xf5fffc00; ++ *((int *)&__m256_result[4]) = 0xfc000000; ++ *((int *)&__m256_result[3]) = 0xf5fffc00; ++ *((int *)&__m256_result[2]) = 0xfc000000; ++ *((int *)&__m256_result[1]) = 0xf5fffc00; ++ *((int *)&__m256_result[0]) = 0xfc000000; ++ __m256_out = __lasx_xvfrintrz_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c +new file mode 100644 +index 000000000..557f9f8b5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000080000; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000007f00340040; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000007f000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020200008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000008; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x03f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op2[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op2[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op2[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op2[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff10; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff10; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x007f007bfffffffb; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x007f007bfffffffb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000c040c0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000c040c0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe000ffffffff08; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff10; ++ __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c +new file mode 100644 +index 000000000..cdb7b11aa +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x38a966b31be83ee9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5f6108dc25b8e028; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long *)&__m256i_op0[0]) = 0x683b8b67e20c8ee5; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffcd42ffffecc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000475ffff4c51; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000740dffffad17; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f4bffff7130; ++ *((unsigned long *)&__m256i_result[3]) = 0x38a966b31be83ee9; ++ *((unsigned long *)&__m256i_result[2]) = 0x5f6108dc25b80001; ++ *((unsigned long *)&__m256i_result[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long *)&__m256i_result[0]) = 0x683b8b67e20c0001; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0008; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0008; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0008ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0008ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff9e9eb09e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff9e9eb09e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffc00000ffc0ffc0; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000165e0000480d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000165e0000480d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_result[2]) = 0x000016000000480d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_result[0]) = 0x000016000000480d; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe800c0d8fffeeece; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe800c0d8fffeeece; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_result[1]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff383efffedf0c; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000200000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff00ffffffff; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c0080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c0080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7c00000880008000; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008b03e457db03e; ++ *((unsigned long *)&__m256i_result[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008b03e457db03e; ++ *((unsigned long *)&__m256i_result[0]) = 0x457db03e45a87310; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000008000b; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000008000b; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000b; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000000000000; ++ __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01fffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01fffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x10ffffff10000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x10ffffff10000006; ++ __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c +new file mode 100644 +index 000000000..18d5c51de +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c +@@ -0,0 +1,482 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x2f03988e2052463e; ++ *((unsigned long *)&__m256d_result[2]) = 0x2f03988e1409212e; ++ *((unsigned long *)&__m256d_result[1]) = 0x2f03988e2052463e; ++ *((unsigned long *)&__m256d_result[0]) = 0x2f03988e1409212e; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256d_result[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000100000018; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000100000018; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x1f60000000c00000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0003030300000100; ++ *((unsigned long *)&__m256d_result[3]) = 0x1febc46085090ea0; ++ *((unsigned long *)&__m256d_result[2]) = 0x1febc46085090ea0; ++ *((unsigned long *)&__m256d_result[1]) = 0x1febc46085090567; ++ *((unsigned long *)&__m256d_result[0]) = 0x1febc46085090567; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x1f9689fdb16cabbd; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x1f9689fdb16cabbd; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff0000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000010000000100; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x1fa0000000080000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff8000; ++ __m256d_out = __lasx_xvfsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffff00000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256d_result[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256d_result[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x5ff00007fff9fff3; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x353bb67af686ad9b; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x353bb67af686ad9b; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001f0000ffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x60000007fffe0001; ++ *((unsigned long *)&__m256d_result[2]) = 0x60000007fffe0001; ++ *((unsigned long *)&__m256d_result[1]) = 0x6056fd4e7926d5c0; ++ *((unsigned long *)&__m256d_result[0]) = 0x6056fd4e1a4616c4; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00001bfa000000f9; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000f900004040; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00001bfa000000f9; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000f900004040; ++ *((unsigned long *)&__m256d_result[3]) = 0x60183329ceb52cf0; ++ *((unsigned long *)&__m256d_result[2]) = 0x6040392cdaf9b3ff; ++ *((unsigned long *)&__m256d_result[1]) = 0x60183329ceb52cf0; ++ *((unsigned long *)&__m256d_result[0]) = 0x6040392cdaf9b3ff; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x3de00103153ff5fb; ++ *((unsigned long *)&__m256d_op0[2]) = 0xbffffffe80000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3de00103153ff5fb; ++ *((unsigned long *)&__m256d_op0[0]) = 0xbffffffe80000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256d_result[3]) = 0x606a20bd700e59a3; ++ *((unsigned long *)&__m256d_result[2]) = 0x6066a09e66c5f1bb; ++ *((unsigned long *)&__m256d_result[1]) = 0x606a20bd700e59a3; ++ *((unsigned long *)&__m256d_result[0]) = 0x6066a09e66c5f1bb; ++ __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256d_op0[2]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256d_result[2]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256d_result[1]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xaf0489001bd4c0c3; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xaf0489001bd4c0c3; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000fffff614; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000fffff614; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff80000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x8060000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x8060000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff; ++ __m256d_out = __lasx_xvfrecip_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c +new file mode 100644 +index 000000000..27df4a27d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c +@@ -0,0 +1,457 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x00000000; ++ *((int *)&__m256_result[6]) = 0x00000000; ++ *((int *)&__m256_result[5]) = 0x00000000; ++ *((int *)&__m256_result[4]) = 0x00000000; ++ *((int *)&__m256_result[3]) = 0x00000000; ++ *((int *)&__m256_result[2]) = 0x00000000; ++ *((int *)&__m256_result[1]) = 0x00000000; ++ *((int *)&__m256_result[0]) = 0x00000000; ++ __m256_out = __lasx_xvfsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000ff80; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ffff; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x60b53246; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x60b5054d; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x0060005a; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x0060005a; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0x5f13ccf5; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0x5f13ccf5; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000002; ++ *((int *)&__m256_op0[4]) = 0x00000008; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000002; ++ *((int *)&__m256_op0[0]) = 0x00000008; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x64800000; ++ *((int *)&__m256_result[4]) = 0x64000000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x64800000; ++ *((int *)&__m256_result[0]) = 0x64000000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x000000bd; ++ *((int *)&__m256_op0[4]) = 0xfef907bc; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x000000bd; ++ *((int *)&__m256_op0[0]) = 0xfef907bc; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x62d2acee; ++ *((int *)&__m256_result[4]) = 0x7fc00000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x62d2acee; ++ *((int *)&__m256_result[0]) = 0x7fc00000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x04e8296f; ++ *((int *)&__m256_op0[6]) = 0x18181818; ++ *((int *)&__m256_op0[5]) = 0x132feea9; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x04e8296f; ++ *((int *)&__m256_op0[2]) = 0x18181818; ++ *((int *)&__m256_op0[1]) = 0x132feea9; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x5cbe15f2; ++ *((int *)&__m256_result[6]) = 0x53261036; ++ *((int *)&__m256_result[5]) = 0x559a674d; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x5cbe15f2; ++ *((int *)&__m256_result[2]) = 0x53261036; ++ *((int *)&__m256_result[1]) = 0x559a674d; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrsqrt_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x000000ff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000ff00; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((int *)&__m256_result[7]) = 0xffffffff; ++ *((int *)&__m256_result[6]) = 0xffffffff; ++ *((int *)&__m256_result[5]) = 0xffffffff; ++ *((int *)&__m256_result[4]) = 0xffffffff; ++ *((int *)&__m256_result[3]) = 0xffffffff; ++ *((int *)&__m256_result[2]) = 0xffffffff; ++ *((int *)&__m256_result[1]) = 0xffffffff; ++ *((int *)&__m256_result[0]) = 0xffffffff; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfc003802; ++ *((int *)&__m256_op0[6]) = 0xfc000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xfc00fc00; ++ *((int *)&__m256_op0[3]) = 0xfc003802; ++ *((int *)&__m256_op0[2]) = 0xfc000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xfc00fc00; ++ *((int *)&__m256_result[7]) = 0x82ff902d; ++ *((int *)&__m256_result[6]) = 0x83000000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x82fe0bd9; ++ *((int *)&__m256_result[3]) = 0x82ff902d; ++ *((int *)&__m256_result[2]) = 0x83000000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x82fe0bd9; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((int *)&__m256_result[7]) = 0x7f800000; ++ *((int *)&__m256_result[6]) = 0x7f800000; ++ *((int *)&__m256_result[5]) = 0x7f800000; ++ *((int *)&__m256_result[4]) = 0x7f800000; ++ *((int *)&__m256_result[3]) = 0x7f800000; ++ *((int *)&__m256_result[2]) = 0x7f800000; ++ *((int *)&__m256_result[1]) = 0x7f800000; ++ *((int *)&__m256_result[0]) = 0x7f800000; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfd02fd02; ++ *((int *)&__m256_op0[6]) = 0xfd02fd02; ++ *((int *)&__m256_op0[5]) = 0xfd02fd02; ++ *((int *)&__m256_op0[4]) = 0xfd02fd02; ++ *((int *)&__m256_op0[3]) = 0xfd02fd02; ++ *((int *)&__m256_op0[2]) = 0xfd02fd02; ++ *((int *)&__m256_op0[1]) = 0xfd02fd02; ++ *((int *)&__m256_op0[0]) = 0xfd02fd02; ++ *((int *)&__m256_result[7]) = 0x81fa28e4; ++ *((int *)&__m256_result[6]) = 0x81fa28e4; ++ *((int *)&__m256_result[5]) = 0x81fa28e4; ++ *((int *)&__m256_result[4]) = 0x81fa28e4; ++ *((int *)&__m256_result[3]) = 0x81fa28e4; ++ *((int *)&__m256_result[2]) = 0x81fa28e4; ++ *((int *)&__m256_result[1]) = 0x81fa28e4; ++ *((int *)&__m256_result[0]) = 0x81fa28e4; ++ __m256_out = __lasx_xvfrecip_s (__m256_op0); ++ ASSERTEQ_32 (__LINE__, __m256_result, __m256_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c +new file mode 100644 +index 000000000..c75468d42 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c +@@ -0,0 +1,471 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffcb423a587053; ++ *((unsigned long *)&__m256d_op0[2]) = 0x6d46f43e71141b81; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffcb423a584528; ++ *((unsigned long *)&__m256d_op0[0]) = 0x9bdf36c8d78158a1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x43f0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffc03b1fc5e050; ++ *((unsigned long *)&__m256d_op0[2]) = 0x6a9e3fa2603a2000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffc03b1fc5e050; ++ *((unsigned long *)&__m256d_op0[0]) = 0x6a9e3fa2603a2000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000001c9880; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000001c9880; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long *)&__m256d_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long *)&__m256d_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long *)&__m256d_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffff9e9eb09e; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00ffffff1e9e9e9e; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffff9e9eb09e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c +new file mode 100644 +index 000000000..ad72f7596 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c +@@ -0,0 +1,1565 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x0000ffff; ++ *((int *)&__m256_op0[6]) = 0xc0008001; ++ *((int *)&__m256_op0[5]) = 0x0000ffff; ++ *((int *)&__m256_op0[4]) = 0xc0008001; ++ *((int *)&__m256_op0[3]) = 0x0000ffff; ++ *((int *)&__m256_op0[2]) = 0xc0008001; ++ *((int *)&__m256_op0[1]) = 0x0000ffff; ++ *((int *)&__m256_op0[0]) = 0xc0008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffe; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x3f3f3f3c; ++ *((int *)&__m256_op0[5]) = 0xc6c6c6c6; ++ *((int *)&__m256_op0[4]) = 0x8787878a; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x3f3f3f3c; ++ *((int *)&__m256_op0[1]) = 0x8787878a; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff9c9d00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x1f0fdf7f; ++ *((int *)&__m256_op0[6]) = 0x3e3b31d4; ++ *((int *)&__m256_op0[5]) = 0x7ff80000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x1f0fdf7f; ++ *((int *)&__m256_op0[2]) = 0x3e3b31d4; ++ *((int *)&__m256_op0[1]) = 0x7ff80000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000200000003; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0080000200000003; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x55555555; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x00000004; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x55555555; ++ *((int *)&__m256_op0[1]) = 0x00000001; ++ *((int *)&__m256_op0[0]) = 0x00000004; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00ff00ffff0000ff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00ff00ffff0000ff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fe363637fe36364; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fe363637fe36364; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000020000000b; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000020000000a; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000000000000000a; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x40000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x40000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256d_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256d_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256d_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000004040104; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffd1108199; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000714910f9; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000030000000c; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000001100000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000500000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffff7e; ++ *((int *)&__m256_op0[4]) = 0xffffff46; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffff7e; ++ *((int *)&__m256_op0[0]) = 0xffffff46; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m256d_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0fffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x0fffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x0fffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x0fffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfd12fd12; ++ *((int *)&__m256_op0[6]) = 0xfd12fd12; ++ *((int *)&__m256_op0[5]) = 0xfd12fd12; ++ *((int *)&__m256_op0[4]) = 0xfd12fd12; ++ *((int *)&__m256_op0[3]) = 0xfd12fd12; ++ *((int *)&__m256_op0[2]) = 0xfd12fd12; ++ *((int *)&__m256_op0[1]) = 0xfd12fd12; ++ *((int *)&__m256_op0[0]) = 0xfd12fd12; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvftintrne_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256d_op1[2]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256d_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256d_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffe4ffe6; ++ *((int *)&__m256_op0[6]) = 0xffe5ffe6; ++ *((int *)&__m256_op0[5]) = 0xffe4ffe6; ++ *((int *)&__m256_op0[4]) = 0xffe5ffe6; ++ *((int *)&__m256_op0[3]) = 0xffe4ffe6; ++ *((int *)&__m256_op0[2]) = 0xffe5ffe6; ++ *((int *)&__m256_op0[1]) = 0xffe4ffe6; ++ *((int *)&__m256_op0[0]) = 0xffe5ffe6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x00010102; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80008000; ++ *((int *)&__m256_op0[6]) = 0x80008000; ++ *((int *)&__m256_op0[5]) = 0x80008000; ++ *((int *)&__m256_op0[4]) = 0x80008000; ++ *((int *)&__m256_op0[3]) = 0x80008000; ++ *((int *)&__m256_op0[2]) = 0x80008000; ++ *((int *)&__m256_op0[1]) = 0x80008000; ++ *((int *)&__m256_op0[0]) = 0x80008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x10000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x10000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x00ff00ff; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0010001000100010; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0010001000107878; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0010001000107878; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0040000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0040000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0040000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00003fea00013fec; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00003fe50001c013; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00003fea00013fec; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00003fe50001c013; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000180000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000180000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffff000000010000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000095120000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc9da000063f50000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x4001000100020000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffefffe; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xfffefffe; ++ *((int *)&__m256_op0[2]) = 0xfffefffd; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x0707feb6; ++ *((int *)&__m256_op0[6]) = 0x0707b7d0; ++ *((int *)&__m256_op0[5]) = 0x45baa7ef; ++ *((int *)&__m256_op0[4]) = 0x6a95a985; ++ *((int *)&__m256_op0[3]) = 0x0707feb6; ++ *((int *)&__m256_op0[2]) = 0x0707b7d0; ++ *((int *)&__m256_op0[1]) = 0x45baa7ef; ++ *((int *)&__m256_op0[0]) = 0x6a95a985; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000017547fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000017547fffffff; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[6]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[5]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[4]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[3]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[2]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[1]) = 0x6d6d6d6d; ++ *((int *)&__m256_op0[0]) = 0x6d6d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256d_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256d_op1[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256d_op1[0]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256d_op1[2]) = 0x8000000100000001; ++ *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256d_op1[0]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfff10000; ++ *((int *)&__m256_op0[4]) = 0xfff10000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfff10000; ++ *((int *)&__m256_op0[0]) = 0xfff10000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xfdfcfda8; ++ *((int *)&__m256_op0[5]) = 0x0000e282; ++ *((int *)&__m256_op0[4]) = 0x1d20ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xfdfcfda8; ++ *((int *)&__m256_op0[1]) = 0x0000e282; ++ *((int *)&__m256_op0[0]) = 0x1d20ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0080000000800000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0080000000800000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256d_op0[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256d_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256d_op0[0]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256d_op1[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256d_op1[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256d_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256d_op1[0]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256d_op1[3]) = 0x003f020001400200; ++ *((unsigned long *)&__m256d_op1[2]) = 0x003f00ff003f00c4; ++ *((unsigned long *)&__m256d_op1[1]) = 0x003f020001400200; ++ *((unsigned long *)&__m256d_op1[0]) = 0x003f00ff003f00c4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[2]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256d_op1[0]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x002e2100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x34000000fff00000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xfff6e00000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x3380000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x363c0000fff3c000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x000000030000000c; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000001100000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000500000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long *)&__m256d_op1[2]) = 0xa5a5a5a5a5a5a5ff; ++ *((unsigned long *)&__m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5; ++ *((unsigned long *)&__m256d_op1[0]) = 0xa5a5a5a5a5a5a5ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x55555555; ++ *((int *)&__m256_op0[6]) = 0x55555555; ++ *((int *)&__m256_op0[5]) = 0x5d5d5d5d; ++ *((int *)&__m256_op0[4]) = 0x5d555d55; ++ *((int *)&__m256_op0[3]) = 0x55555555; ++ *((int *)&__m256_op0[2]) = 0x55555555; ++ *((int *)&__m256_op0[1]) = 0x5d5ca2a3; ++ *((int *)&__m256_op0[0]) = 0x5d54aaab; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0b085bfc00000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0b004bc000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0b085bfc00000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0b004bc000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0xffeeffaf; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000011; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0xffeeffaf; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x00ff00ff; ++ *((int *)&__m256_op0[3]) = 0x00ff00ff; ++ *((int *)&__m256_op0[2]) = 0x00ff00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x001d001d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256d_op0[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256d_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256d_op0[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x81fa28e4; ++ *((int *)&__m256_op0[6]) = 0x81fa28e4; ++ *((int *)&__m256_op0[5]) = 0x81fa28e4; ++ *((int *)&__m256_op0[4]) = 0x81fa28e4; ++ *((int *)&__m256_op0[3]) = 0x81fa28e4; ++ *((int *)&__m256_op0[2]) = 0x81fa28e4; ++ *((int *)&__m256_op0[1]) = 0x81fa28e4; ++ *((int *)&__m256_op0[0]) = 0x81fa28e4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_w_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c +new file mode 100644 +index 000000000..19db4e192 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c +@@ -0,0 +1,511 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffefffe; ++ *((int *)&__m256_op0[6]) = 0xfffefffe; ++ *((int *)&__m256_op0[5]) = 0xfffefffe; ++ *((int *)&__m256_op0[4]) = 0xfffefffe; ++ *((int *)&__m256_op0[3]) = 0xfffefffe; ++ *((int *)&__m256_op0[2]) = 0xfffefffe; ++ *((int *)&__m256_op0[1]) = 0xfffefffe; ++ *((int *)&__m256_op0[0]) = 0xfffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000200; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000200; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000200; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000200; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffff1; ++ *((int *)&__m256_op0[6]) = 0xfffffff1; ++ *((int *)&__m256_op0[5]) = 0xfffffff1; ++ *((int *)&__m256_op0[4]) = 0xfffffff1; ++ *((int *)&__m256_op0[3]) = 0xfffffff1; ++ *((int *)&__m256_op0[2]) = 0xfffffff1; ++ *((int *)&__m256_op0[1]) = 0xfffffff1; ++ *((int *)&__m256_op0[0]) = 0xfffffff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x43ef8787; ++ *((int *)&__m256_op0[4]) = 0x8000ffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x43ef8787; ++ *((int *)&__m256_op0[0]) = 0x8000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000001df00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000001df00000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0x00030005; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0x00030005; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7ff80000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x7ff80000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x7ff80000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x7ff80000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000002; ++ *((int *)&__m256_op0[6]) = 0x00000002; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000002; ++ *((int *)&__m256_op0[2]) = 0x00000002; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7ff00000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x7ff00000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x7ff00000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x7ff00000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00016e00; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00016e00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftint_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x38a966b301f41ffd; ++ *((unsigned long *)&__m256d_op0[2]) = 0x5f6108ee13ff0000; ++ *((unsigned long *)&__m256d_op0[1]) = 0xf41a56e8d10201f6; ++ *((unsigned long *)&__m256d_op0[0]) = 0x683b8b34f1020001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256d_op0[2]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256d_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256d_op0[0]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x00000000007a00f8; ++ *((unsigned long *)&__m256d_op0[2]) = 0x00ff00ff01640092; ++ *((unsigned long *)&__m256d_op0[1]) = 0x00000000007a00f8; ++ *((unsigned long *)&__m256d_op0[0]) = 0x00ff00ff01640092; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000000007fff80fe; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000007fff80fe; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff80007ffe; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000ff007fff80fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op0[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256d_op0[3]) = 0x0000000008e8c000; ++ *((unsigned long *)&__m256d_op0[2]) = 0x000000000fffc000; ++ *((unsigned long *)&__m256d_op0[1]) = 0x0000000008e8c000; ++ *((unsigned long *)&__m256d_op0[0]) = 0x000000000fffc000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c +new file mode 100644 +index 000000000..b0fdf7e0b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c +@@ -0,0 +1,1580 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m256_op0[7]) = 0xc58a0a0a; ++ *((int *)&__m256_op0[6]) = 0x07070706; ++ *((int *)&__m256_op0[5]) = 0x006b60e4; ++ *((int *)&__m256_op0[4]) = 0x180b0023; ++ *((int *)&__m256_op0[3]) = 0x1b39153f; ++ *((int *)&__m256_op0[2]) = 0x334b966a; ++ *((int *)&__m256_op0[1]) = 0xf1d75d79; ++ *((int *)&__m256_op0[0]) = 0xefcac002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x40404040; ++ *((int *)&__m256_op0[6]) = 0x40404040; ++ *((int *)&__m256_op0[5]) = 0x40404040; ++ *((int *)&__m256_op0[4]) = 0x40404040; ++ *((int *)&__m256_op0[3]) = 0x40404040; ++ *((int *)&__m256_op0[2]) = 0x40404040; ++ *((int *)&__m256_op0[1]) = 0x40404040; ++ *((int *)&__m256_op0[0]) = 0x40404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00080000; ++ *((int *)&__m256_op0[4]) = 0x00000010; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00080000; ++ *((int *)&__m256_op0[0]) = 0x00000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x40f69fe6; ++ *((int *)&__m256_op0[6]) = 0x3c26f4f5; ++ *((int *)&__m256_op0[5]) = 0x7ff7ffff; ++ *((int *)&__m256_op0[4]) = 0x00000007; ++ *((int *)&__m256_op0[3]) = 0x40f69fe6; ++ *((int *)&__m256_op0[2]) = 0x3c26f4f5; ++ *((int *)&__m256_op0[1]) = 0x7ff7ffff; ++ *((int *)&__m256_op0[0]) = 0x00000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00060000; ++ *((int *)&__m256_op0[6]) = 0x00040000; ++ *((int *)&__m256_op0[5]) = 0x00020000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00060000; ++ *((int *)&__m256_op0[2]) = 0x00040000; ++ *((int *)&__m256_op0[1]) = 0x00020000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffff0000; ++ *((int *)&__m256_op0[4]) = 0xffff0000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffff0000; ++ *((int *)&__m256_op0[0]) = 0xffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x55550000; ++ *((int *)&__m256_op0[6]) = 0x55550000; ++ *((int *)&__m256_op0[5]) = 0x55550000; ++ *((int *)&__m256_op0[4]) = 0x55550000; ++ *((int *)&__m256_op0[3]) = 0x55550000; ++ *((int *)&__m256_op0[2]) = 0x55550000; ++ *((int *)&__m256_op0[1]) = 0x55550000; ++ *((int *)&__m256_op0[0]) = 0x55550000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000d5000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000d5000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000d5000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000d5000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x007f8080; ++ *((int *)&__m256_op0[6]) = 0x007f007f; ++ *((int *)&__m256_op0[5]) = 0x007f8080; ++ *((int *)&__m256_op0[4]) = 0x007f007f; ++ *((int *)&__m256_op0[3]) = 0x007f8080; ++ *((int *)&__m256_op0[2]) = 0x007f007f; ++ *((int *)&__m256_op0[1]) = 0x007f8080; ++ *((int *)&__m256_op0[0]) = 0x007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x08e8c000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0fffc000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x08e8c000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0fffc000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftinth_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000002; ++ *((int *)&__m256_op0[4]) = 0x00000008; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000002; ++ *((int *)&__m256_op0[0]) = 0x00000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7f1d7f7f; ++ *((int *)&__m256_op0[6]) = 0x7f1d7f3b; ++ *((int *)&__m256_op0[5]) = 0x02020102; ++ *((int *)&__m256_op0[4]) = 0x02020102; ++ *((int *)&__m256_op0[3]) = 0x7f1d7f7f; ++ *((int *)&__m256_op0[2]) = 0x7f1d7f3b; ++ *((int *)&__m256_op0[1]) = 0x02020102; ++ *((int *)&__m256_op0[0]) = 0x02020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000102; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x39ffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x39ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x80000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x80000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x80000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x80000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x000055ff; ++ *((int *)&__m256_op0[6]) = 0x01f90ab5; ++ *((int *)&__m256_op0[5]) = 0xaa95eaff; ++ *((int *)&__m256_op0[4]) = 0xfec6e01f; ++ *((int *)&__m256_op0[3]) = 0x000055ff; ++ *((int *)&__m256_op0[2]) = 0x01f90ab5; ++ *((int *)&__m256_op0[1]) = 0xaa95eaff; ++ *((int *)&__m256_op0[0]) = 0xfec6e01f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffeb683; ++ *((int *)&__m256_op0[6]) = 0x9ffffd80; ++ *((int *)&__m256_op0[5]) = 0xfffe97c0; ++ *((int *)&__m256_op0[4]) = 0x20010001; ++ *((int *)&__m256_op0[3]) = 0xfffeb683; ++ *((int *)&__m256_op0[2]) = 0x9ffffd80; ++ *((int *)&__m256_op0[1]) = 0xfffe97c0; ++ *((int *)&__m256_op0[0]) = 0x20010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x000000ff; ++ *((int *)&__m256_op0[6]) = 0x000000f8; ++ *((int *)&__m256_op0[5]) = 0xbc8ff0ff; ++ *((int *)&__m256_op0[4]) = 0xffffcff8; ++ *((int *)&__m256_op0[3]) = 0x000000ff; ++ *((int *)&__m256_op0[2]) = 0x000000f8; ++ *((int *)&__m256_op0[1]) = 0xbc8ff0ff; ++ *((int *)&__m256_op0[0]) = 0xffffcff8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000001; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000001; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000001; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000001; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fe37fe3; ++ *((int *)&__m256_op0[6]) = 0x001d001d; ++ *((int *)&__m256_op0[5]) = 0x7fff7fff; ++ *((int *)&__m256_op0[4]) = 0x7fff0000; ++ *((int *)&__m256_op0[3]) = 0x7fe37fe3; ++ *((int *)&__m256_op0[2]) = 0x001d001d; ++ *((int *)&__m256_op0[1]) = 0x7fff7fff; ++ *((int *)&__m256_op0[0]) = 0x7fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000010; ++ *((int *)&__m256_op0[6]) = 0x00000010; ++ *((int *)&__m256_op0[5]) = 0x00000010; ++ *((int *)&__m256_op0[4]) = 0x00000010; ++ *((int *)&__m256_op0[3]) = 0x00000010; ++ *((int *)&__m256_op0[2]) = 0x00000010; ++ *((int *)&__m256_op0[1]) = 0x00000010; ++ *((int *)&__m256_op0[0]) = 0x00000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrml_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x8b141414; ++ *((int *)&__m256_op0[4]) = 0x0e0e0e0e; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x36722a7e; ++ *((int *)&__m256_op0[0]) = 0x66972cd6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x6a9e3f9a; ++ *((int *)&__m256_op0[4]) = 0x603a2001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x6a9e3f9a; ++ *((int *)&__m256_op0[0]) = 0x603a2001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x0000fafe; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x0000fafe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00fffefe; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0xfffffffc; ++ *((int *)&__m256_op0[4]) = 0x5556aaa8; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0xfffffffc; ++ *((int *)&__m256_op0[0]) = 0x5556aaa8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffcc80; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x7dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x002a5429; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x002a5429; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x77777777; ++ *((int *)&__m256_op0[6]) = 0xf7777777; ++ *((int *)&__m256_op0[5]) = 0xf7777777; ++ *((int *)&__m256_op0[4]) = 0x77777777; ++ *((int *)&__m256_op0[3]) = 0x77777777; ++ *((int *)&__m256_op0[2]) = 0xf7777777; ++ *((int *)&__m256_op0[1]) = 0xf7777777; ++ *((int *)&__m256_op0[0]) = 0x77777777; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000009; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000009; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000009; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x010c7fbc; ++ *((int *)&__m256_op0[6]) = 0x7e1c7e1c; ++ *((int *)&__m256_op0[5]) = 0xfe000000; ++ *((int *)&__m256_op0[4]) = 0x00000024; ++ *((int *)&__m256_op0[3]) = 0x010c7fbc; ++ *((int *)&__m256_op0[2]) = 0x7e1c7e1c; ++ *((int *)&__m256_op0[1]) = 0xfe000000; ++ *((int *)&__m256_op0[0]) = 0x00000024; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfffffe20; ++ *((int *)&__m256_op0[6]) = 0x001dfe1f; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xfffffe20; ++ *((int *)&__m256_op0[2]) = 0x001dfe1f; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffe1; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffe1; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffe1; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffe1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000040; ++ *((int *)&__m256_op0[6]) = 0x00000020; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000040; ++ *((int *)&__m256_op0[2]) = 0x00000020; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xfefefeff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xff295329; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xfefefeff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xff295329; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xff00ffff; ++ *((int *)&__m256_op0[6]) = 0xff00ffff; ++ *((int *)&__m256_op0[5]) = 0xff00ffff; ++ *((int *)&__m256_op0[4]) = 0xff00ffff; ++ *((int *)&__m256_op0[3]) = 0xff00ffff; ++ *((int *)&__m256_op0[2]) = 0xff00ffff; ++ *((int *)&__m256_op0[1]) = 0xff00ffff; ++ *((int *)&__m256_op0[0]) = 0xff00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x7fefffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x7fefffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x02020102; ++ *((int *)&__m256_op0[6]) = 0x02020102; ++ *((int *)&__m256_op0[5]) = 0x02020102; ++ *((int *)&__m256_op0[4]) = 0x02020102; ++ *((int *)&__m256_op0[3]) = 0x02020102; ++ *((int *)&__m256_op0[2]) = 0x02020102; ++ *((int *)&__m256_op0[1]) = 0x02020102; ++ *((int *)&__m256_op0[0]) = 0x02020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000001; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000001; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvftintrph_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x515f93f0; ++ *((int *)&__m256_op0[6]) = 0x23600fb9; ++ *((int *)&__m256_op0[5]) = 0x948b39e0; ++ *((int *)&__m256_op0[4]) = 0xb7405f6f; ++ *((int *)&__m256_op0[3]) = 0x48ef0878; ++ *((int *)&__m256_op0[2]) = 0x00007c83; ++ *((int *)&__m256_op0[1]) = 0x78af877c; ++ *((int *)&__m256_op0[0]) = 0x7d7f86f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000df93f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000077843; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x17171717; ++ *((int *)&__m256_op0[6]) = 0x17171717; ++ *((int *)&__m256_op0[5]) = 0x000607f7; ++ *((int *)&__m256_op0[4]) = 0x00000001; ++ *((int *)&__m256_op0[3]) = 0x17171717; ++ *((int *)&__m256_op0[2]) = 0x17171717; ++ *((int *)&__m256_op0[1]) = 0x000607f7; ++ *((int *)&__m256_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00ff00ff; ++ *((int *)&__m256_op0[6]) = 0x00ff00ff; ++ *((int *)&__m256_op0[5]) = 0x00ff00ff; ++ *((int *)&__m256_op0[4]) = 0x017e01fe; ++ *((int *)&__m256_op0[3]) = 0x017e00ff; ++ *((int *)&__m256_op0[2]) = 0x017e00ff; ++ *((int *)&__m256_op0[1]) = 0x00ff00ff; ++ *((int *)&__m256_op0[0]) = 0x017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xfefefefe; ++ *((int *)&__m256_op0[6]) = 0xfefefefe; ++ *((int *)&__m256_op0[5]) = 0xfe8bfe0e; ++ *((int *)&__m256_op0[4]) = 0xfe8bfe12; ++ *((int *)&__m256_op0[3]) = 0xfefefefe; ++ *((int *)&__m256_op0[2]) = 0xfefefefe; ++ *((int *)&__m256_op0[1]) = 0xfe8bfe0e; ++ *((int *)&__m256_op0[0]) = 0xfe8bfe12; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x71717171; ++ *((int *)&__m256_op0[6]) = 0x71010101; ++ *((int *)&__m256_op0[5]) = 0x8e8e8e8e; ++ *((int *)&__m256_op0[4]) = 0x8f00ffff; ++ *((int *)&__m256_op0[3]) = 0x71717171; ++ *((int *)&__m256_op0[2]) = 0x71010101; ++ *((int *)&__m256_op0[1]) = 0x8e8e8e8e; ++ *((int *)&__m256_op0[0]) = 0x8f00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00060000; ++ *((int *)&__m256_op0[6]) = 0x00040000; ++ *((int *)&__m256_op0[5]) = 0x00020000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00060000; ++ *((int *)&__m256_op0[2]) = 0x00040000; ++ *((int *)&__m256_op0[1]) = 0x00020000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xff1cff1c; ++ *((int *)&__m256_op0[6]) = 0xff1cff1c; ++ *((int *)&__m256_op0[5]) = 0xff1cff1c; ++ *((int *)&__m256_op0[4]) = 0xff1cff1c; ++ *((int *)&__m256_op0[3]) = 0xff1cff1c; ++ *((int *)&__m256_op0[2]) = 0xff1cff1c; ++ *((int *)&__m256_op0[1]) = 0xff1cff1c; ++ *((int *)&__m256_op0[0]) = 0xff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x000fffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x000fffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00002262; ++ *((int *)&__m256_op0[6]) = 0x00005111; ++ *((int *)&__m256_op0[5]) = 0x0000165e; ++ *((int *)&__m256_op0[4]) = 0x0000480d; ++ *((int *)&__m256_op0[3]) = 0x00002262; ++ *((int *)&__m256_op0[2]) = 0x00005111; ++ *((int *)&__m256_op0[1]) = 0x0000165e; ++ *((int *)&__m256_op0[0]) = 0x0000480d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0xffffffff; ++ *((int *)&__m256_op0[6]) = 0xffffffff; ++ *((int *)&__m256_op0[5]) = 0xffffffff; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0xffffffff; ++ *((int *)&__m256_op0[2]) = 0xffffffff; ++ *((int *)&__m256_op0[1]) = 0xffffffff; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0xffffffff; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00000000; ++ *((int *)&__m256_op0[6]) = 0x00000000; ++ *((int *)&__m256_op0[5]) = 0x00000000; ++ *((int *)&__m256_op0[4]) = 0x00000000; ++ *((int *)&__m256_op0[3]) = 0x00000000; ++ *((int *)&__m256_op0[2]) = 0x00000000; ++ *((int *)&__m256_op0[1]) = 0x00000000; ++ *((int *)&__m256_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((int *)&__m256_op0[7]) = 0x00040004; ++ *((int *)&__m256_op0[6]) = 0x00040004; ++ *((int *)&__m256_op0[5]) = 0x00040005; ++ *((int *)&__m256_op0[4]) = 0x00040005; ++ *((int *)&__m256_op0[3]) = 0x00040004; ++ *((int *)&__m256_op0[2]) = 0x00040004; ++ *((int *)&__m256_op0[1]) = 0x00040005; ++ *((int *)&__m256_op0[0]) = 0x00040005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c +new file mode 100644 +index 000000000..1cf0ec698 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff6fffefffe005b; ++ *((unsigned long *)&__m256i_result[2]) = 0xffbefffefffe005a; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff6fffefffe005b; ++ *((unsigned long *)&__m256i_result[0]) = 0xffbefffefffe005a; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fffffffefffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffffffefffe; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000023; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000023; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff8fffffff8ffff; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffecffffffec; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffefffffffe; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffc000400780087; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fe80fffc0183; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffc000400f8ff87; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80ff00ff7c0183; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000800; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffc00000078; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffc000000f8; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff790000077c; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007ff000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff1fffffff1; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000001ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000f6ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000f6ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000017f0000017f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000017f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000017f; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000017000000080; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000400010004; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c +new file mode 100644 +index 000000000..14ec081a4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000015d050192cb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x028e509508b16ee9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000033ff01020e23; ++ *((unsigned long *)&__m256i_op0[0]) = 0x151196b58fd1114d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001ffaa0000040e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000716800007bb6; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001ffe80001fe9c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000228200001680; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100ab000500a0; ++ *((unsigned long *)&__m256i_result[2]) = 0x000200b800080124; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001011b000200aa; ++ *((unsigned long *)&__m256i_result[0]) = 0x00150118008f0091; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001341c4000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001000310000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007f00340040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f000000ff; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01ae00ff00ff; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_result[2]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_result[0]) = 0x007c000000810081; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffc0003fffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f010700c70106; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f010700c70106; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0010000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0010000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000000010000; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000002a5; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000002a5; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0e0e0e0e0e0e0e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000e0e0e0e0e0e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00010e0d00009e0e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00009000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000e0e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00009000; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000300000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000300000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000300000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000300000004; ++ __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0501030102141923; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffd5020738b43ddb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x010200023b8e4174; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff4ff4e11410b40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf259905a09c23be0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6d3c2d3a89167aeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000501e99b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000109973de7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001020f22; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001890b7a39; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007f000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff0000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001fff9fff8; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffff81ffffeb2f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f6ee0570b4e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000018de; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffb4ffcec0f1; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff81ffffeb2f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f6ee0570b4e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000018de; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffb4ffcec0f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001ffffeab0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000e0574abc; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000018de; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001ffcec0a5; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffe367cc82f8989a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4f90000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffc3aaa8d58f43c8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000082f8989a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000d58f43c8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000170017; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004411; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000002362; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000175d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000002362; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000175d; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000ff00; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100003ffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100003fcd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100003ffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100003fcd; ++ __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000f; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf3f3f3f3f3f3f4f3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf3f3f3f3f3f3f4f3; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000800080008000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000001ce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000001fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000001ce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001fd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001fd; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfa15fa15fa15fa14; ++ __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c +new file mode 100644 +index 000000000..fa4d5fd6f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000fffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffb80000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffb80000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000012; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000f0f0003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000f1003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000f0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000011; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001fffffff9; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100002000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00b7003600120000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00b7006200fc0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00b7004100190004; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbe632a4f1c3c5653; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffe54affffffd3; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffcfae000000d8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00006681000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffd668ffffa9c6; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff00000bff00000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff00000bff00000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffbff1ffffbff1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffbff1ffffbff1; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff1fffffff1; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000051; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000000fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000051; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000000fff; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8180ffff8181; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8180ffff8181; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00feff0100feff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00feff0100feff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffd017d00; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffe00; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffc0c0ffffbfc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffc0c0ffffbfc0; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffeffff10000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeffff10000000; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0007a861; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c +new file mode 100644 +index 000000000..87c3e25b1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff70ff01ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff70ff01ff80; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_result[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_result[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_result[0]) = 0x00c200c200c200bb; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff70; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff70; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00bb; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0057; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff00bb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0057; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffa003e; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffb009c; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffa003e; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffb009c; ++ __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6300000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6300000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffc001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000c000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffc001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000c000; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000018ffff2b13; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000018ffff2b13; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00800080ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00800080ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007fe268; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007fe268; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffbfffc; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00fffffff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffff00; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffefefeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff295329; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff01010101; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00d6acd7; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff01010101; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00d6acd7; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x120e120dedf1edf2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x120e120dedf1edf2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000120e120d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000120e120d; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000000d; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0048007f002f0028; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x004a007f002f0028; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x24342434ffff2435; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x24342434ffff2435; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffba8300004fc2; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffebffffffebfff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffebffffffebfff; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff3eedffff3ee3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff3eedffff3ee3; ++ __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c +new file mode 100644 +index 000000000..5a047a508 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xbff0800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xbff0800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000307fffe72e800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020200008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0008010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000001010000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5555555580000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5555555580000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x555555553f800000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000003f00000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long *)&__m256i_op1[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[2]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x6580668200fe0002; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000004000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff04ff00ff00ff00; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000003f00390035; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8015003f0006001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000003f00390035; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8015003f0006001f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000001529c1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80007073cadc3779; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000001529c1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80007073cadc3779; ++ *((unsigned long *)&__m256i_result[3]) = 0x00008000003f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00390015003529c1; ++ *((unsigned long *)&__m256i_result[1]) = 0x00008000003f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00390015003529c1; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020002000200020; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000002c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000002c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000002c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000002c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000002c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000002c0000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7eeefefefefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7eeefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7e00ee00fe00fe00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe00fe00fe00fe00; ++ *((unsigned long *)&__m256i_result[1]) = 0x7e00ee00fe00fe00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe00fe00fe00fe00; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xaad5555500000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xaad5555500000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001fff200007ef; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff7bfffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff80007fe9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff7bfffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff80007fe9; ++ *((unsigned long *)&__m256i_result[3]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_result[1]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x407b40ff40ff40f1; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0001fffa; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00018069; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0001fffa; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00018069; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff01fffffffaff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff01fffffffaff; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001ff800000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xd8d8c00000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001ff800000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xd8d8c00000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080ff0080; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001ff03ff; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000011ffd97c3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000011ffd97c3; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000019ffdf403; ++ __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe0ffe000000000; ++ __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c +new file mode 100644 +index 000000000..4393045c3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0000fffe0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000fefc0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000fffe0000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefdfffffefd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f007f78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f00007f7f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f00fffb7f78fffc; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8080808080808081; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8080808080808081; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000808000008080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000808000008081; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff01fffffffeff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff01fffffffeff; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x07efefefefefefee; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00f300ff00f3; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00f300ff00f3; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00f300ff00f3; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00f300ff00f3; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00fe00fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00fe00fe; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007c7fff00007fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00817fff00810000; ++ *((unsigned long *)&__m256i_result[1]) = 0x007c7fff00007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00817fff00810000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000001d001d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001d0000001d; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000070007000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe070e000e070e000; ++ *((unsigned long *)&__m256i_result[2]) = 0xe070e000e070e000; ++ *((unsigned long *)&__m256i_result[1]) = 0xe070e000e070e000; ++ *((unsigned long *)&__m256i_result[0]) = 0xe070e000e070e000; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x003f003f003f0040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f00004040; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffe98; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000e000e; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000a0080000b00; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000a0080000b00; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000a0080000b00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000a0080000b00; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3f00c0003f00c000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3f00c0003f00c000; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[0]) = 0x4980008068400000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf000f000f000f000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf000f010f000f010; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf000f000f000f000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf000f010f000f010; ++ *((unsigned long *)&__m256i_result[3]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0ff00fff0ff10; ++ __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffed; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffed; ++ __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c +new file mode 100644 +index 000000000..ce28c4857 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c +@@ -0,0 +1,272 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x146014141414146e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf19998668e5f4b84; ++ long_op1 = 0x0000007942652524; ++ *((unsigned long *)&__m256i_result[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007942652524; ++ *((unsigned long *)&__m256i_result[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_result[0]) = 0xf19998668e5f4b84; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202020201010000; ++ int_op1 = 0x00000045eef14fe8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202020201010000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x012e2110012e2110; ++ int_op1 = 0x00000000000000ac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000ac; ++ *((unsigned long *)&__m256i_result[0]) = 0x012e2110012e2110; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff800000ff800000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff0000ff; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff0000ff; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff383efffedf0c; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff383e000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xe800c000fffeeece; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff383efffedf0c; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0020000000200000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000048; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000048; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff7fffffff7; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff700000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff7fffffff7; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ int_op1 = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000000000001e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001e00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c +new file mode 100644 +index 000000000..644d2ce4b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c +@@ -0,0 +1,380 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefe00000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000170017; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffefffffffe; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001f00e0ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001f00e0ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000200000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000739c; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op1[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op1[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_result[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x6040190d00000000; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000ffff0000ffff; ++ __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8001ffff0001; ++ __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c +new file mode 100644 +index 000000000..c1eda6c6c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c +@@ -0,0 +1,86 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_result[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_result[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ad152a5ad72feeb; ++ __m256i_out = __lasx_xvld ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_result[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_result[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ad152a5ad72feeb; ++ __m256i_out = __lasx_xvldx ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xebebebebebebebeb; ++ __m256i_out = __lasx_xvldrepl_b ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0xfeebfeebfeebfeeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xfeebfeebfeebfeeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xfeebfeebfeebfeeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xfeebfeebfeebfeeb; ++ __m256i_out = __lasx_xvldrepl_h ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0xad72feebad72feeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xad72feebad72feeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xad72feebad72feeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xad72feebad72feeb; ++ __m256i_out = __lasx_xvldrepl_w ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ad152a5ad72feeb; ++ __m256i_out = __lasx_xvldrepl_d ((unsigned long *)&__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c +new file mode 100644 +index 000000000..84b3c6599 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvldi (-4080); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0xfebcfebcfebcfebc; ++ *((unsigned long *)&__m256i_result[2]) = 0xfebcfebcfebcfebc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfebcfebcfebcfebc; ++ *((unsigned long *)&__m256i_result[0]) = 0xfebcfebcfebcfebc; ++ __m256i_out = __lasx_xvldi (1724); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fd1000000000000; ++ __m256i_out = __lasx_xvldi (-943); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvldi (1820); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7200000072000000; ++ __m256i_out = __lasx_xvldi (-3214); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff1dffffff1d; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff1dffffff1d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffff1dffffff1d; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff1dffffff1d; ++ __m256i_out = __lasx_xvldi (2845); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvldi (-4080); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fd1000000000000; ++ __m256i_out = __lasx_xvldi (-943); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_result[3]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7200000072000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7200000072000000; ++ __m256i_out = __lasx_xvldi (-3214); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c +new file mode 100644 +index 000000000..105567951 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c +@@ -0,0 +1,16 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler-times "xvldrepl.w" 2} } */ ++ ++#define N 258 ++ ++float a[N], b[N], c[N]; ++ ++void ++test () ++{ ++ for (int i = 0; i < 256; i++) ++ { ++ a[i] = c[0] * b[i] + c[1]; ++ } ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c +new file mode 100644 +index 000000000..f9634b128 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c +@@ -0,0 +1,742 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x34ec5670cd4b5ec0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4f111e4b8e0d7291; ++ *((unsigned long *)&__m256i_op1[1]) = 0xeaa81f47dc3bdd09; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0e0d5fde5df99830; ++ *((unsigned long *)&__m256i_op2[3]) = 0x80c72fcd40fb3bc0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x84bd087966d4ace0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x26aa68b274dc1322; ++ *((unsigned long *)&__m256i_op2[0]) = 0xe072db2bb9d4cd40; ++ *((unsigned long *)&__m256i_result[3]) = 0x044819410d87e69a; ++ *((unsigned long *)&__m256i_result[2]) = 0x21d3905ae3e93be0; ++ *((unsigned long *)&__m256i_result[1]) = 0x5125883a30da0f20; ++ *((unsigned long *)&__m256i_result[0]) = 0x6d7b2d3ac2777aeb; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffeff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff1f; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffeff; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fffe00010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fffe00010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000002e0000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000002e0000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[2]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[0]) = 0x000607f700000001; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000003f00000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x370036db92c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x371462137c1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x800000fe7e02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x371c413b999d04b5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffff00ff00ffff00; ++ *((unsigned long *)&__m256i_op2[2]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff00000000ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x37fe365b920d007e; ++ *((unsigned long *)&__m256i_result[2]) = 0x381462137d1e0149; ++ *((unsigned long *)&__m256i_result[1]) = 0x80ff00fe7e020060; ++ *((unsigned long *)&__m256i_result[0]) = 0x381c413b99cd04dd; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op2[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op2[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op2[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_result[3]) = 0xd100645944100004; ++ *((unsigned long *)&__m256i_result[2]) = 0xd1908469108400d1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000404040104; ++ *((unsigned long *)&__m256i_result[0]) = 0xd1108199714910f9; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x61f1000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0108000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x61f1a18100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0108000000000000; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000055555555; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000055555555; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000004; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2aaaaaaa2aaaaaab; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2aaaaaaa2aaaaaab; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_result[0]) = 0x7c007c007c007c00; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fd00ffff02fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00007f7f00007f00; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffa80000ff31; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0101010127272525; ++ *((unsigned long *)&__m256i_op2[2]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0101010127272525; ++ *((unsigned long *)&__m256i_op2[0]) = 0x23a2a121179e951d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fefffffffffffff; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x008e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007000008e700000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007000008e700000; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op2[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op2[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op2[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000100000001; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000080040; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00009fff00002001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00009fff00002001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001497c98ea4fca; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001497c98ea4fca; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000006715b036; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000006715b036; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007f80; ++ __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe0ffe000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fa0001fff808000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe0ffe000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fa0001fff808000; ++ __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c +new file mode 100644 +index 000000000..6238685bc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c +@@ -0,0 +1,856 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000100000000; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff8fff8fff8fff8; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000003f78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003f78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000003f78; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003f78; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002001800ff0078; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01f8007001f80070; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002001800ff0078; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01f8007001f80070; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_result[3]) = 0x00300b40fc001678; ++ *((unsigned long *)&__m256i_result[2]) = 0xfc00000000001f80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00300b40fc001678; ++ *((unsigned long *)&__m256i_result[0]) = 0xfc00000000001f80; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe8440000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe8440000; ++ __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_result[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffefffefffefffef; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000e0e0e0e0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xebfd15f000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01700498ff8f1600; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf520c7c024221300; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00802fd0ff540a80; ++ *((unsigned long *)&__m256i_op1[3]) = 0xebfd15f000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01700498ff8f1600; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf520c7c024221300; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00802fd0ff540a80; ++ *((unsigned long *)&__m256i_op2[3]) = 0xf96d674800000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x14187a7822b653c0; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfbe0b866962b96d0; ++ *((unsigned long *)&__m256i_result[3]) = 0xebfd15f000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x015c6a7facc39600; ++ *((unsigned long *)&__m256i_result[1]) = 0xfa070a51cbd95300; ++ *((unsigned long *)&__m256i_result[0]) = 0x00c7463075439280; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0555550000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0555550000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_result[3]) = 0x0555550000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0555550000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80938013800d0005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00001fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00001fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_result[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_result[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x80938013800d0005; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op1[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op1[0]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_result[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_result[0]) = 0x556caad9aabbaa88; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff7f7f7fff7fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff7f7f7fff7fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3f7f7f7eff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3f7f7f7eff800000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffeffffffdd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x002affaa00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffffffdd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffdc; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001000b000b; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001000b000b; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[3]) = 0x2020080800000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000004044f4f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0ef11ae55a5a6767; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_result[2]) = 0x6040190d20227a78; ++ *((unsigned long *)&__m256i_result[1]) = 0x132feeabd2d33b38; ++ *((unsigned long *)&__m256i_result[0]) = 0x6040190d00000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x3); ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000fe0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000fe0000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fff7fff; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1086658a18ba3594; ++ *((unsigned long *)&__m256i_op0[2]) = 0x160fe9f000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1086658a18ba3594; ++ *((unsigned long *)&__m256i_op0[0]) = 0x160fe9f000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe161616161614f61; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000616100004f61; ++ *((unsigned long *)&__m256i_result[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_result[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_result[0]) = 0x4df5b1a3ed5e02c1; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000100000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x01fffffffe000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe00000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x817f11ed81800ff0; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000ffffff; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff80000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c +new file mode 100644 +index 000000000..5fa080375 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c +@@ -0,0 +1,723 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f7f7f80; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00003cfc0000006f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00003cfc0000006f; ++ *((unsigned long *)&__m256i_result[3]) = 0x02007f8002000400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000c5dc02005f64; ++ *((unsigned long *)&__m256i_result[1]) = 0x02007f8002000400; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000c5dc02005f64; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000700020004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000700020004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000070002000a; ++ __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dfffc000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dfffdfffc00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001fbf9fbe29f52; ++ *((unsigned long *)&__m256i_op2[2]) = 0x5b409c0000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001fbf9fbe29f52; ++ *((unsigned long *)&__m256i_op2[0]) = 0x5b409c0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_result[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long *)&__m256i_result[0]) = 0xff874dc687870000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000403f3fff; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40effc0000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40effc0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00010003fc827a86; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007f7f7f7f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f017fc0ddbf7d86; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00153f1594ea02ff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff15c1ea95ea02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xc06e7c817f7e8081; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000bd3f016f177a; ++ *((unsigned long *)&__m256i_result[1]) = 0xc06e7c8100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x60c485800178147a; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000011f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000011f; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000192540; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000192540; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff88ff88; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffdf5b000041b0; ++ __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000fb8000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fb8000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x807f807f00000380; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc03fc03f000001c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000001c0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[3]) = 0x807f807f00000380; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03fc03f000001c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001c0; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff80ff00ff80ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80ff00ff80ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x007f00ff007f00fe; ++ *((unsigned long *)&__m256i_op2[2]) = 0xf711ee11f711ee91; ++ *((unsigned long *)&__m256i_op2[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xf711ee11f711ee11; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff80ff00ff80ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80ff00ff80ff01; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000045ff740023; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000045ff740023; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000155b200; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000b70000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000016e00; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c +new file mode 100644 +index 000000000..40549448e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c +@@ -0,0 +1,940 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0003ff540000081c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0003ffd00003fd38; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001ffaa0000040e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000716800007bb6; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001ffe80001fe9c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000228200001680; ++ *((unsigned long *)&__m256i_op2[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op2[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_result[3]) = 0x002e4db200000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000315ac0000d658; ++ *((unsigned long *)&__m256i_result[1]) = 0x00735278007cf94c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003ed8800031b38; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff0001ff04; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff02a0fefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000cfefd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff7fffbfefa; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1eff1902a0fea4; ++ *((unsigned long *)&__m256i_result[1]) = 0xff10000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff10fff9ff13fd17; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fefefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9fbf9fbf9fbf9fb; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xfdfffdfffdfffdff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01fffffdff; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fff3fff; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff00ff00ef32; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010100f10100fd4; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[0]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xebebebebebebebeb; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001f001fffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffe0ffe000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x34ec5670cd4b5ec0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4f111e4b8e0d7291; ++ *((unsigned long *)&__m256i_op0[1]) = 0xeaa81f47dc3bdd09; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0e0d5fde5df99830; ++ *((unsigned long *)&__m256i_op1[3]) = 0x67390c19e4b17547; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbacda0f96d2cec01; ++ *((unsigned long *)&__m256i_op1[1]) = 0xee20ad1adae2cc16; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5a2003c6a406fe53; ++ *((unsigned long *)&__m256i_op2[3]) = 0x80c72fcd40fb3bc0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x84bd087966d4ace0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x26aa68b274dc1322; ++ *((unsigned long *)&__m256i_op2[0]) = 0xe072db2bb9d4cd40; ++ *((unsigned long *)&__m256i_result[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_result[2]) = 0x5464fbfc416b9f71; ++ *((unsigned long *)&__m256i_result[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d8264202b8ea3f0; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff0000ffff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff000000ffffff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffffffff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fa022a01a401e5; ++ *((unsigned long *)&__m256i_op1[2]) = 0x030d03aa0079029b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x024c01f901950261; ++ *((unsigned long *)&__m256i_op1[0]) = 0x008102c2008a029f; ++ *((unsigned long *)&__m256i_op2[3]) = 0x002e4db200000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000315ac0000d658; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00735278007cf94c; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0003ed8800031b38; ++ *((unsigned long *)&__m256i_result[3]) = 0x01a72334ffff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff4f6838ff937648; ++ *((unsigned long *)&__m256i_result[1]) = 0x00a2afb7fff00ecb; ++ *((unsigned long *)&__m256i_result[0]) = 0xffce110f004658c7; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003a099512; ++ *((unsigned long *)&__m256i_op0[1]) = 0x280ac9da313763f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe032c738adcc6bbf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000fffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003a099512; ++ *((unsigned long *)&__m256i_result[1]) = 0x280ac9da313763f5; ++ *((unsigned long *)&__m256i_result[0]) = 0xe032c738adcc6bbf; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010003; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001f0000ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000060008; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000000c005b; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffffffffffe0000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000040053; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0007fff7; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff005affa4; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000053ffac; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000420080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5fff5fff607f0000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100004300000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100004300000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op2[2]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x01ffff4300ffff00; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffb8579f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffdbbbcf; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffb8579f; ++ __m256i_out ++ = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_result[2]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00b200b300800080; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000404040; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1fa0000000080000; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out ++ = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202020201010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202020201010000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe928f1313c9cc; ++ *((unsigned long *)&__m256i_result[0]) = 0x4244020201010000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0005000500050005; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op2[3]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_op2[2]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3f; ++ *((unsigned long *)&__m256i_op2[0]) = 0x3f3f3f3f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_result[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_result[0]) = 0x8787878a00000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8787878a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe3; ++ *((unsigned long *)&__m256i_result[2]) = 0x63636344c3c3c4f6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffc3; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3c3c500fffffff6; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffbfffcffeffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffbfffcffeffff0; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op2[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op2[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0018761ed60b5d7f; ++ *((unsigned long *)&__m256i_result[2]) = 0xabdcdc9938afafe9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0018761ed60b5d7f; ++ *((unsigned long *)&__m256i_result[0]) = 0xabdcdc9938afafe9; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c +new file mode 100644 +index 000000000..683876933 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c +@@ -0,0 +1,742 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op2[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0x1031146010201020; ++ *((unsigned long *)&__m256i_result[2]) = 0x1020102010201020; ++ *((unsigned long *)&__m256i_result[1]) = 0x1031146010201020; ++ *((unsigned long *)&__m256i_result[0]) = 0x1020102010201020; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfff8fffffff8ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x94d7fb5200000000; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020000010201; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000020000010201; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020000010201; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000020000010201; ++ __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op2[2]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op2[0]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000017e; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3fc6c68787; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f87870000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003e3ec6c68686; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffeff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003e3e87870000; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_op0[2]) = 0x019d00a2003a0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_op0[0]) = 0x019d00a2003a0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[2]) = 0x019d00a20039fff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fe007a01c40110; ++ *((unsigned long *)&__m256i_result[0]) = 0x019d00a2003a0000; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f3280000dfff; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff90ffffff80; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3fffffffff7f0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3fffffffff7f0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000c7aff7c00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000c7aff7c00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000002030000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x030303670101fd90; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000002030000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x030303670101fd90; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3ffffffffc7bfc99; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3ffffffffc7bfc99; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0200000202000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0200000202000002; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0001000100010000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x020afefb08140000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf800f7fff8ffc0ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xf8fff7fff7ffa000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800e000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c +new file mode 100644 +index 000000000..f9f88b654 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c +@@ -0,0 +1,799 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff80008000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x074132a240000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x06f880008000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080008000b8f1; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000c0; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000012481e4950; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000001658166830; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1f60010000080100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1f60010000080100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f60010000080100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f60010000080100; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffeffed; ++ *((unsigned long *)&__m256i_op2[3]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_result[2]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_result[1]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_result[0]) = 0xbf3ffffffffeffed; ++ __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ff80100ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ff80100ffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x34000000fff00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff6e00000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3380000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x363c0000fff3c000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffb7146213; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffc1e0049; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffb71c413b; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf3317da580000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x34000000fff00000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff6e00000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3380000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x363c0000fff3c000; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe000ffffffffff; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_op2[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op2[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_op2[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op2[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ce3c0050d32d40; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fadafc013acf600; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ce3c0050d32d40; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fadafc013acf600; ++ __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffecffffffec; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffefdfffffefd; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000100; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffff7d80000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000100; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000001fdfffffe02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff01fefffeff02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000001fdfffffe02; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000001fefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff01fefffeff02; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffeeffaf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffeeffaf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffcffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0febedc9bb95dd8f; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffcffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0febedc9bb95dd8f; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000545400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000545400; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff7bfffff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff80007fe9; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff7bfffff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff80007fe9; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010511c54440437; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010511c54440437; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000103fca1bd; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000103fca1bd; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000103fca1bd; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000103fca1bd; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010511c54440438; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010511c54440438; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000045ff740023; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000045ff740023; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000155b200; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000b70000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000016e00; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000001e001e001e0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c +new file mode 100644 +index 000000000..5210e4cf9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c +@@ -0,0 +1,820 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000b8f81b8c850f4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000b8f81b8c850f4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000b8f81b8c850f4; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000b8f81b8c850f4; ++ *((unsigned long *)&__m256i_result[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_result[2]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_result[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_result[0]) = 0x000b2673a90896a4; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc600000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff000003c0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff000003c0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7c030000ffc4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7c030000ffc4; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0007a861; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbff0000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0002fffeffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0002fffeffff; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000627; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x437f201f201f2020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x037f201f001f2020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x437f201f201f2020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x037f201f001f2020; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x21bb481000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01bf481000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x21bb481000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01bf481000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000ffffff1dff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000ffffff1dff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8001ffff0001; ++ __m256i_out ++ = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffe40; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffffffdd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffdc; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202020201010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202020201010000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op2[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000eef14fe8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe928f1313c9cc; ++ *((unsigned long *)&__m256i_result[0]) = 0x4244020201010000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0005000500050005; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op2[3]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_op2[2]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3f; ++ *((unsigned long *)&__m256i_op2[0]) = 0x3f3f3f3f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_result[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_result[0]) = 0x8787878a00000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffff6; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op2[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8787878a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe3; ++ *((unsigned long *)&__m256i_result[2]) = 0x63636344c3c3c4f6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffc3; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3c3c500fffffff6; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffbfffcffeffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffbfffcffeffff0; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op2[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op2[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0018761ed60b5d7f; ++ *((unsigned long *)&__m256i_result[2]) = 0xabdcdc9938afafe9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0018761ed60b5d7f; ++ *((unsigned long *)&__m256i_result[0]) = 0xabdcdc9938afafe9; ++ __m256i_out ++ = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c +new file mode 100644 +index 000000000..96c6671f2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f0000007f000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400040004; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5900000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5900000000000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ffce20; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ffce20; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ee1100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000004560408; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ee1100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000004560408; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000004560420; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f433c78; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000400010004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000e0001000e; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1090918800000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1090918800000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c80780000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffebeeaaefafb; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffebeeaaefafb; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op0[0]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff00007fff0000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op1[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_op1[0]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_result[2]) = 0x556caad9aabbaa88; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004a557baac4; ++ *((unsigned long *)&__m256i_result[0]) = 0x556caad9aabbaa88; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020006; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001; ++ __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c +new file mode 100644 +index 000000000..38f2c0afe +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c +@@ -0,0 +1,560 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffa80000ff31; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x817f11ed81800ff0; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800000ff800000ff; ++ __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffeeffaf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffeeffaf; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010100f10100fd4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffeeffaf; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010100f10100fd4; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc600000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffd8ffc7ffffdf0d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffd8ffc7ffffdf0d; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000014402080144; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f433c78; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff97a2; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000070002000a; ++ __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc7418a023680; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff8845bb954b00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc7418a023680; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff8845bb954b00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000002a5429; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040404040; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffff08a7de0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffff07c4170; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff08a7de0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffff07c4170; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffff08a7de0; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffff07c4170; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffff08a7de0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffff07c4170; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c +new file mode 100644 +index 000000000..e804a0a45 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c +@@ -0,0 +1,471 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffd10000006459; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000441000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000040400000104; ++ *((unsigned long *)&__m256i_result[3]) = 0x0f0f0f0f0f0f6459; ++ *((unsigned long *)&__m256i_result[2]) = 0x0f0f44100f0f0f0f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0f0f0f0f0f0f0f0f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0f0f0f0f0f0f0f0f; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8080808180808093; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80808081808080fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8080808180808093; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80808081808080fb; ++ *((unsigned long *)&__m256i_result[3]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256i_result[2]) = 0xf5f5f5f5f5f5f5fe; ++ *((unsigned long *)&__m256i_result[1]) = 0xf5f5f5f5f5f5f5f5; ++ *((unsigned long *)&__m256i_result[0]) = 0xf5f5f5f5f5f5f5fb; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[2]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[1]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[0]) = 0x0909090909090909; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0a0a0a7f0a0a0a; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[2]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[0]) = 0x0707070707070707; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0c0c0c0c0c0c0c0c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0c0c0c0c0c0c0c0c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0c0c0c0c0c0c0c0c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0c0c0c0c0c0c0c0c; ++ __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0005000500050005; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff90000fff9fff9; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00080008000801ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[1]) = 0x00080008000801ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000800080008; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000c9; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000008000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000008000165a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0009000900090009; ++ *((unsigned long *)&__m256i_result[2]) = 0x000900090009165a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0009000900090009; ++ *((unsigned long *)&__m256i_result[0]) = 0x000900090009165a; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[3]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[2]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m256i_result[0]) = 0x000a000a000a000a; ++ __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000401000000; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0110000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0110000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0110000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0110000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0110000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0110000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0110000000000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0110000000000080; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000e0000000e; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff400000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000900000009; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000081f20607a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000081f20607a; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000004560420; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7e00000000000000; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff5; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff5; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007b007e; ++ __m256i_out = __lasx_xvmaxi_d (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c +new file mode 100644 +index 000000000..b6b34063c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c +@@ -0,0 +1,504 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0a0a0a0a0a0a0a; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[2]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[1]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_result[0]) = 0x1717171717171717; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1616161616161616; ++ *((unsigned long *)&__m256i_result[2]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe16167f161616; ++ *((unsigned long *)&__m256i_result[0]) = 0x161616167fffffff; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000feb60000b7d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000feb60000c7eb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000feb60000b7d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000feb60000c7eb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0707feb60707c7eb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0707feb60707c7eb; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[2]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m256i_result[0]) = 0x1111111111111111; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_result[3]) = 0x1818ffff1818ffa3; ++ *((unsigned long *)&__m256i_result[2]) = 0x181818181818185a; ++ *((unsigned long *)&__m256i_result[1]) = 0x1818ffff1818ffa3; ++ *((unsigned long *)&__m256i_result[0]) = 0x181818181818185a; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x1c1c1c1c1c1c1c1c; ++ __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xeffc000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf064c6098d214127; ++ *((unsigned long *)&__m256i_op0[1]) = 0xeffc000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf064c6098d214127; ++ *((unsigned long *)&__m256i_result[3]) = 0xeffc001800180018; ++ *((unsigned long *)&__m256i_result[2]) = 0xf064c6098d214127; ++ *((unsigned long *)&__m256i_result[1]) = 0xeffc001800180018; ++ *((unsigned long *)&__m256i_result[0]) = 0xf064c6098d214127; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007000700070007; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_result[2]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_result[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_result[0]) = 0x0018001800180018; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0017001700176d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0017001700176d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0017001700176d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0017001700176d6d; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x001fffffffe00011; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x001fffffffe00011; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001400000014; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000e00000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000e00000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000e00000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000e00000080; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001b00fd0000; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000c7aff7c00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000c7aff7c00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffd017d00; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001f0000ffff; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000300000003; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1010101000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x101010100000000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001e0007ffff; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000004000000fd; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000004000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001f; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001700000017; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001700000017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001700000017; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_result[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_result[0]) = 0x07fed3c8f7ad28d0; ++ __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001e; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001e; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[0]) = 0x1c1b1a191c1b1a19; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001c; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000012; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000b; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000013; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000014; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000014; ++ __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c +new file mode 100644 +index 000000000..7dbf335c1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c +@@ -0,0 +1,575 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8001000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000800080000728; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8001800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff80008000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080008000b8f1; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000180007fe8; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[3]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_result[2]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_result[1]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_result[0]) = 0x8800c800c800c801; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff00ffff8000; ++ __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x003ff18080010201; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x003ff18080010201; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000d24; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fffe; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff81ff7d; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000004040104; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffd1108199; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000714910f9; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffd10000006459; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000441000000004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000040400000104; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffd10000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffd1108199; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000104; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fd00ffff02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff02ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff02ff; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffe1ffe0; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf000f00000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf000f00000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf000f00000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf000f00000000001; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007c000000810081; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4545454545454545; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffbfffffffb; ++ __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c +new file mode 100644 +index 000000000..9eaa0e9e7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c +@@ -0,0 +1,680 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f017f807f017d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f017f807f017f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000017f0000017f; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f70000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f70000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002080100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002080100; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000001de2dc20; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000001de2dc20; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffe651bfff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000001000100; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1d1d1d1ddd9d9d1d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1d1d1d1d1d1d1d1d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1d1d1d1d046fdd1d; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001d1d00001d1d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001d1d00007f79; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001d1d00001d1d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001d1d0000dd1d; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00011ffb0000bee1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00011ffb0000bee1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001010600000106; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001010600000106; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffd5d5ffffd5d6; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffd5d5ffffd5d6; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc58a0a0a07070706; ++ *((unsigned long *)&__m256i_op1[2]) = 0x006b60e4180b0023; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1b39153f334b966a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf1d75d79efcac002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x006b60e40e0e0e0e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x36722a7e66972cd6; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000101ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00010e0d00009e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00009000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000e0e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00009000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000033; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400100004001; ++ __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x60f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x60f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010183f9999b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01010101d58f43c9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010183f9999b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x01010101d58f43c9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f00ff007f00ff; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000001fffe; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c +new file mode 100644 +index 000000000..01aabada8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf96d674800000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x14187a7822b653c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfbe0b866962b96d0; ++ *((unsigned long *)&__m256i_result[3]) = 0xf90c0c0c00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ca40c0c0c0c0cc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0c0c0c0c0cb60cc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfbe0b80c960c96d0; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010bfc80010bf52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff1bfca0011bfcb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010bfc80010bf52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff1bfca0011bfcb; ++ *((unsigned long *)&__m256i_result[3]) = 0xf5f5bfc8f5f5bff5; ++ *((unsigned long *)&__m256i_result[2]) = 0xf5f1bfcaf5f5bfcb; ++ *((unsigned long *)&__m256i_result[1]) = 0xf5f5bfc8f5f5bff5; ++ *((unsigned long *)&__m256i_result[0]) = 0xf5f1bfcaf5f5bfcb; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000aaabffff; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff47b4ffff5878; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000b84b0000a787; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff47b4ffff5878; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000b84b0000a787; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff07b4ffff0707; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000b8070000a787; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff07b4ffff0707; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000b8070000a787; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[2]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[0]) = 0xf3f3f3f3f3f3f3f3; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc30e0000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc30e0000ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3030000ff800000; ++ __m256i_out = __lasx_xvmini_b (__m256i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff6fff6fff6fff6; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002ffff0002ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002ffff0002ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f7bc0001f7bd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000f93b0000017c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f7bc0001f7bd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000f93b0000017b; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff2f93bfff2fff2; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff2f93bfff2fff2; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff9fff9fff9fff9; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff3fff3fff3fff3; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff2fff2fff2fff2; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff2fff2fff2fff2; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff2fff2fff2fff2; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff2fff2fff2fff2; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_h (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0e400; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff2fffffff2; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff2fffffff2; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff2fffffff2; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff2fffffff2; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000010000000a; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff8fffffff8; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff7fffffff7; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff0fffffff0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff0fffffff0; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_w (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x327f010101010102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x327f010101010102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff4; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff007f007f00; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_d (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c +new file mode 100644 +index 000000000..8eb7d9355 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c +@@ -0,0 +1,284 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1b1b1b1b1b1b1b1b; ++ *((unsigned long *)&__m256i_result[2]) = 0x1b1b1b1b1b1b1b1b; ++ *((unsigned long *)&__m256i_result[1]) = 0x1b1b1b1b1b1b1b1b; ++ *((unsigned long *)&__m256i_result[0]) = 0x1b1b1b1b1b1b1b1b; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e1e1e0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e1e1e0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e1e1e0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e1e1e0000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0a0a0a00000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008001c0010001c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008001c0010001c; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007000000000000; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001700170017; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffe400000707; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000af100001455; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffe400000707; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000af100001455; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff61010380; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff61010380; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000006; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c +new file mode 100644 +index 000000000..6f34f6ffc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c +@@ -0,0 +1,395 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvmod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8001b72e0001b72e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8001b72eaf12d5f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000247639d9cb530; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8001b72eaf12d5f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0xff81ffe50001ffe5; ++ *((unsigned long *)&__m256i_result[2]) = 0xff81ffe5ffa6ffc6; ++ *((unsigned long *)&__m256i_result[1]) = 0x000200aafe9affe5; ++ *((unsigned long *)&__m256i_result[0]) = 0xff81ffe5ffa6ffc6; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x80008000b70fb810; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3c0f3c0f3911b910; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80008000b70fb810; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3c0f3c0f3911b910; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000781e0000f221; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000781e0000f221; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101000101010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000101010001; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000800080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc9d8080067f50020; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc70000020000c000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000010100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000001000100; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff3cff3cff3cff3c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000014; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000008e7c00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000067751500; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000008e7c00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000067751500; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_result[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffefffefffefffef; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000e0001000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c +new file mode 100644 +index 000000000..d0a9e9d2f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c +@@ -0,0 +1,410 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e18000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0909090909090909; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffe000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffe000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000e000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000e000; ++ __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe06df8d7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffbe8b470f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x081abb9d36ee1037; ++ *((unsigned long *)&__m256i_result[2]) = 0x1617eb17129bfd38; ++ *((unsigned long *)&__m256i_result[1]) = 0x081abb9d36ee1037; ++ *((unsigned long *)&__m256i_result[0]) = 0x1617eb17129bfd38; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfe8bfe0efe8bfe12; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfe8bfe0efe8bfe12; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001005500020000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001005500020000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000100010001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000005400000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000005400000002; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f0000ff807f81; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f0000ff807f81; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff8000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000095120000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc9da000063f50000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001000000; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c +new file mode 100644 +index 000000000..15e66ae38 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c +@@ -0,0 +1,86 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fafe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fafe; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmskgez_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c +new file mode 100644 +index 000000000..53b21f98b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c +@@ -0,0 +1,373 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3922d40000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000c85221c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7ebfab800000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f20; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000009f0; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x40d74f979f99419f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000022; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1f9d9f9d1f9db29f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1f9d9f9d201cb39e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x201c9f9d201cb29f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1f9d9f9d201cb39e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007773; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003373; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00630064004bffd0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000b8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000b8; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffc0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000022; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000088; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000088; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x296e000018170000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x296e000018170000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffc000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffeff000c057c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffc000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffeff000c057c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000f0f0; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000008c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000008c; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000cc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000cc; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5); ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000054; ++ __m256i_out = __lasx_xvmskltz_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmskltz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c +new file mode 100644 +index 000000000..81865fd32 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c +@@ -0,0 +1,163 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0020002000400040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000005555; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000005555; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004411; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000033; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000f91; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000001f; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x006018000000001a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0060401900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x006018000000001a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0060401900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000006170; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000006170; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000002ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000002ff; ++ __m256i_out = __lasx_xvmsknz_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c +new file mode 100644 +index 000000000..8c8d4996b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c +@@ -0,0 +1,647 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x074132a240000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[0]) = 0x555555ab555555ab; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017f0000017d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000017f0000017f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002e0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002e0000fffe; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000002e0000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000002e0000002e; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000002e0000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f7bc0001f7bd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000f93b0000017c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000f7bc0001f7bd; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f93b0000017b; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1410141014101410; ++ *((unsigned long *)&__m256i_result[2]) = 0x1410141014101410; ++ *((unsigned long *)&__m256i_result[1]) = 0x1410141014101410; ++ *((unsigned long *)&__m256i_result[0]) = 0x1410141014101410; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000007fff01ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdb8e209d0cce025a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_result[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_result[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xbe632a4f1c3c5653; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x01010101010000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000004800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000004800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000004800000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000004800000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x7b7b7b7b80000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xcacacb1011040500; ++ *((unsigned long *)&__m256i_result[1]) = 0x7b7b7b7b80000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xcacacb1011040500; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffefffffffe; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001a00; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_result[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_result[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0xa020202020206431; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff80fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff80fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff80007ffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff007fff80fe; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff457db03f; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x001ffffe00200000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fe200000fe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000009e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000009e; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffff0078ffff0078; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffff0078ffff0078; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c +new file mode 100644 +index 000000000..58ad8bfcd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000007ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fdfcfda8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000e2821d20ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fdfcfda8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000e2821d20ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff8000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ff8000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc5c085372cfabfba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0658f2dc0eb21e3c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf259905a0c126604; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6d3c2d3aa1c82947; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f647000007d6; ++ *((unsigned long *)&__m256i_result[2]) = 0x031b358c021ee663; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000faaf0000f9f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x02b4fdadfa9704df; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7ffffffffffff1f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbffffffffffffeff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff8900000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff8900000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000aaaa00008bfe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000aaaa0000aaaa; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff5556aaaa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff5556aaaa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000007fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000036a37; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000007fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000004def9; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffe0001; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[0]) = 0x000408080c111414; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c +new file mode 100644 +index 000000000..85d24fe44 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c +@@ -0,0 +1,635 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe8bfe0efe8bfe12; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe8bfe0efe8bfe12; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000027; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40efffe09fa88260; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6b07ca8e013fbf01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40efffe09fa7e358; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80ce32be3e827f00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x86ff76ffff4eff42; ++ *((unsigned long *)&__m256i_op1[2]) = 0x86ffffffffff9eff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x86ff76ffff4effff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x86ff32ffaeffffa0; ++ *((unsigned long *)&__m256i_result[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_result[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ec0a1b2aba7ed0; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffc020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffc020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0c6a240000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0f00204000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x04a3000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x04a3000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff8000fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00017fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff8000fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00017fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007f00fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000fe0000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007f00fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000fe0000007f; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffe00000ffe00000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffe00000ffe00000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000fafe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000fafe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff01c000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000f1000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001341c4000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001000310000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000033e87ef1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000002e2100; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000045f3fb; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_result[2]) = 0x3abac5447fffca89; ++ *((unsigned long *)&__m256i_result[1]) = 0x3aadec4f6c7975b1; ++ *((unsigned long *)&__m256i_result[0]) = 0x3abac5447fffca89; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000010000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000010000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c +new file mode 100644 +index 000000000..be3c8e718 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffd1b24e00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffc1278fffce4c8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0e2d5626ff75cdbc; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5db4b156e2002a78; ++ *((unsigned long *)&__m256i_op1[1]) = 0xeeffbeb03ba3e6b0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0c16e25eb28d27ea; ++ *((unsigned long *)&__m256i_result[3]) = 0xf96d674800000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long *)&__m256i_result[1]) = 0x14187a7822b653c0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfbe0b866962b96d0; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff01ffffff08; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43700f0100003008; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffff01ffffff08; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43700f0100003008; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000f8; ++ *((unsigned long *)&__m256i_result[2]) = 0xbc8ff0ffffffcff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000f8; ++ *((unsigned long *)&__m256i_result[0]) = 0xbc8ff0ffffffcff8; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x353bb67af686ad9b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x353bb67af686ad9b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0200000200000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0200000200000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1cfd000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000180000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffd2; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff8000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000080000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fff003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000627; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000627; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffd5a98; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffd5a98; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007f3a40; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x120e120dedf1edf2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x120e120dedf1edf2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010000010100000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010000010100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010000010100000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010000010100000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fff00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0040000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff00000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbc30c40108a45423; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbc263e0e5d00e69f; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbc30c40108a4544b; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbc20e63aa8b9663f; ++ *((unsigned long *)&__m256i_result[3]) = 0x71860bf35f0f9d81; ++ *((unsigned long *)&__m256i_result[2]) = 0x720ed94a46f449ed; ++ *((unsigned long *)&__m256i_result[1]) = 0x71860bf35f0f9f39; ++ *((unsigned long *)&__m256i_result[0]) = 0x72544f0e6e95cecd; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x111ebb784f9c4100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c386546809f3b50; ++ *((unsigned long *)&__m256i_op0[1]) = 0x111ebb784f9bf1ac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x21f6050d955d3f68; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbab0c4b000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xaa0ac09800000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00bf00bf00bf00bf; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00bf00bf00bf00bf; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00bf00bf00bf00bf; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00bf00bf00bf00bf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000088; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000088; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc0008000c0008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f8000000000008; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800f800000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f8000000000008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800f800000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe3f7fff7fffcbd08; ++ *((unsigned long *)&__m256i_result[2]) = 0x0dbfa28000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xe3f7fff7fffcbd08; ++ *((unsigned long *)&__m256i_result[0]) = 0x0dbfa28000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_result[1]) = 0x7070545438381c1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7070545438381c1c; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c +new file mode 100644 +index 000000000..01ff71649 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c +@@ -0,0 +1,590 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf96d674800000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x44a4330e2c7116c0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x14187a7822b653c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfbe0b866962b96d0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffd1b24e00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffc1278fffce4c8; ++ *((unsigned long *)&__m256i_result[3]) = 0xebfd15f000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01700498ff8f1600; ++ *((unsigned long *)&__m256i_result[1]) = 0xf520c7c024221300; ++ *((unsigned long *)&__m256i_result[0]) = 0x00802fd0ff540a80; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001dc; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ee; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffce; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fc7c; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffce; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fc7c; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff8080000004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff8080000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00001ff800000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd8d8c00000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001ff800000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd8d8c00000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3f80000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3f80000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff8ffffff08; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00f800ffcff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff8ffffff08; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00f800ffcff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_result[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_result[0]) = 0x0045b8ae81bce1d8; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfd12fd12fd12fd12; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000060000108; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001060005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fef0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000017bfffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000180007fe8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000062d4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c +new file mode 100644 +index 000000000..32088f4ae +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c +@@ -0,0 +1,590 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4e5cba76cdbaaa78; ++ *((unsigned long *)&__m256i_op0[2]) = 0xce68fdeb4e33eaff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4e45cc2dcda41b30; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4ccb1e5c4d6b21e4; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x44bb2cd3a35c2fd0; ++ *((unsigned long *)&__m256i_result[0]) = 0xca355ba46a95e31c; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202000002020202; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202000002010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202000002020202; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202000002020000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe000000ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe000001fe0000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff01ff68; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000070ff017de6; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff01ff68; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000070ff017de6; ++ *((unsigned long *)&__m256i_op1[3]) = 0x761ed60b5d7f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdc9938afafe904f1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x761ed60b5d7f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdc9938afafe904f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00004c9000e9d886; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00004c9000e9d886; ++ __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6651bfff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6651bfff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe0001c3fe4001; ++ *((unsigned long *)&__m256i_result[0]) = 0x8ffe800100000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff010000fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff19; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff02ff020001fffa; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000100010001fffa; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe01ff0006ffcf; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000e62f8f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe02fe0006ffd6; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000006ffd6; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01010101010000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100feff0100eeef; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100feff00feef11; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001010; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x201fdfe0201fdfe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x201fdfe0201fdfe0; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff47b4ffff5878; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000b84b0000a787; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff47b4ffff5878; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000b84b0000a787; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010100000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010100000101; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff1b00e4; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0807f7f80807f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0807f7f80807f7f8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0807f7f80807f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0807f7f80807f7f8; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000004e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000000a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000a; ++ __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007f000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0408040800008002; ++ *((unsigned long *)&__m256i_result[0]) = 0xfbf7fbf7ffff7ffd; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x04080c1014182d35; ++ *((unsigned long *)&__m256i_result[2]) = 0x716d696573765161; ++ *((unsigned long *)&__m256i_result[1]) = 0x04080c1014182d35; ++ *((unsigned long *)&__m256i_result[0]) = 0x716d696573765161; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c +new file mode 100644 +index 000000000..19157f682 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c +@@ -0,0 +1,605 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01fe02; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01fe02; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff01c000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000f1000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01fe04; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01fe04; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000022ffdd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000022ffdd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000f4b6ff23; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000f4b6ff23; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f20; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000009f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op1[3]) = 0x417e01f040800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x299d060000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x417e01f040800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x29108b0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707b7cff8f84830; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000354ad4c28; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707b7cff8f84830; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000354ad4c28; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ef; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000016e00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000155b200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000b70000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000008b; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff010000008b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op1[2]) = 0x03acfc5303260e80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op1[0]) = 0x03acfc5303260e80; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_result[2]) = 0x000f9bb562f56c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000b0cfffff4f3; ++ *((unsigned long *)&__m256i_result[0]) = 0x000f9bb562f56c80; ++ __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3ff1808001020101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000ff7f1080ef8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000ff7f1080ef8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x003ff18080010201; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x003ff18080010201; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfbba01c0003f7e3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfbd884e7003f7e3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe367cc82f8989a; ++ *((unsigned long *)&__m256i_result[2]) = 0x4f90000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc3aaa8d58f43c8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020000000200001; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x101010100000000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff01feffff01ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff01feffff01ff; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5fa0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c +new file mode 100644 +index 000000000..80fdcda63 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7a7cad6eca32ccc1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7a7cad6efe69abd1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7a7cad6eca32ccc1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7a7cad6efe69abd1; ++ *((unsigned long *)&__m256i_result[3]) = 0xff86005300360034; ++ *((unsigned long *)&__m256i_result[2]) = 0xff86005300020055; ++ *((unsigned long *)&__m256i_result[1]) = 0xff86005300360034; ++ *((unsigned long *)&__m256i_result[0]) = 0xff86005300020055; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2c27000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2c27000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007f3a40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f3a40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000d24; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000073333333; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010080; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000015d050192cb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x028e509508b16ee9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000033ff01020e23; ++ *((unsigned long *)&__m256i_op0[0]) = 0x151196b58fd1114d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff0000ffff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff000000ffffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffaff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffd7200fffff74f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000702f; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0408040800008003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04080408fff87803; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0707b7cff8f84830; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000354ad4c28; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0707b7cff8f84830; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000354ad4c28; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffd5a98; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007e8092; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007e8092; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe07de080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000001f20607a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe07de080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000001f20607a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00153f1594ea02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff15c1ea95ea02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe7ffffffeffffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe7ffffffeffffc0; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000017fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000017fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c +new file mode 100644 +index 000000000..1a4b221fe +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c +@@ -0,0 +1,470 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01480000052801a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffdcff64; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbea2e127c046721f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1729c073816edebe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xde91f010000006f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5ef1f90efefaf30d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00170000028500de; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fd02f20d; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4ffc3f7800000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3fc03f6400000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4ffc3f7800000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3fc03f6400000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x4eb13ec100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3ec13ec100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4eb13ec100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3ec13ec100000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fc03fc0; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc039000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x04f104f104f104f1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x04f104f104f104f1; ++ __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ff8010000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ff8010000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f800f800f800f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0018181800181818; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f800f800f800f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0018181800181818; ++ *((unsigned long *)&__m256i_result[3]) = 0x001f1f3e3e1f1f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0003060909060300; ++ *((unsigned long *)&__m256i_result[1]) = 0x001f1f3e3e1f1f00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003060909060300; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x90007fff90008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0ffffffe90008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4800408ef07f7f01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0800000eeffffe02; ++ __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fc03e000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fc03e000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00fffb0402fddf20; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00fffb0402fddf20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001fbf9fbe29f52; ++ *((unsigned long *)&__m256i_result[2]) = 0x5b409c0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001fbf9fbe29f52; ++ *((unsigned long *)&__m256i_result[0]) = 0x5b409c0000000000; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000043efffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000043efffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfffa004fffd8000; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffec75c2d209f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ff03fe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffec75c2d209f; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000fffd0003; ++ __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c +new file mode 100644 +index 000000000..9fcd3ce0c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c +@@ -0,0 +1,440 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f2c678e38d1104c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf01010153a10101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f01ff5b7f10ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017f00007f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007f0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff810000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x71860bf35f0f9d81; ++ *((unsigned long *)&__m256i_op0[2]) = 0x720ed94a46f449ed; ++ *((unsigned long *)&__m256i_op0[1]) = 0x71860bf35f0f9f39; ++ *((unsigned long *)&__m256i_op0[0]) = 0x72544f0e6e95cecd; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff8910ffff7e01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff3573ffff8960; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff5e5ffff8130; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffcb423a587053; ++ *((unsigned long *)&__m256i_result[2]) = 0x6d46f43e71141b81; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffcb423a584528; ++ *((unsigned long *)&__m256i_result[0]) = 0x9bdf36c8d78158a1; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x800000007fff0001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000ff7f0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xbfffffffffff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfff800080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xbfffffffffff8000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfff800080000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000007f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000002de; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000002de; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000007f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffff808; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000007f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffff808; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c +new file mode 100644 +index 000000000..3cd1626d4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c +@@ -0,0 +1,526 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x002e4db200000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000315ac0000d658; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00735278007cf94c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0003ed8800031b38; ++ *((unsigned long *)&__m256i_result[3]) = 0xffd1b24e00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcea54ffff29a8; ++ *((unsigned long *)&__m256i_result[1]) = 0xff8cad88ff8306b4; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffc1278fffce4c8; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x06f880008000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000010180000101; ++ *((unsigned long *)&__m256i_result[2]) = 0xfa08800080000101; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080008000480f; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010201010204; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010102; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000f1c00; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000800000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0081000100810001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0081000100810001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0081000100810001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0081000100810001; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x223d76f09f3881ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3870ca8d013e76a0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x223d76f09f37e357; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ec0a1b2aba7ed0; ++ *((unsigned long *)&__m256i_result[3]) = 0xdec38a1061c87f01; ++ *((unsigned long *)&__m256i_result[2]) = 0xc8903673ffc28a60; ++ *((unsigned long *)&__m256i_result[1]) = 0xdec38a1061c91da9; ++ *((unsigned long *)&__m256i_result[0]) = 0xbd14f6e5d6468230; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fdda7dc4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000007e8080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fdda7dc4; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff827f80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0226823c; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff827f80; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0226823c; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000180000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000180000001; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000008000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000008000165a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff00017fff005d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffe9a6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff00017fff005d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffe9a6; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff0100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff0100000001; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100004300000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100004300000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff0000bd00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff0000bd00000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff00fff8ffc0; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000497fe0000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000683fe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000497fe0000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000683fe0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff97c120000000; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefdfdfdfd; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefdfdfdfd; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010201010102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010202020203; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010201010102; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000032; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000032; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffce; ++ __m256i_out = __lasx_xvneg_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvneg_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_result[1]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000812000008120; ++ __m256i_out = __lasx_xvneg_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffd880; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffd880; ++ __m256i_out = __lasx_xvneg_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c +new file mode 100644 +index 000000000..3a491ecab +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c +@@ -0,0 +1,170 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x40d74f979f99419f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xbf28b0686066be60; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f6ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffff6ff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffff6ff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000900ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000900ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8888888808888888; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8888888808888888; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0888888888888888; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x77777777f7777777; ++ *((unsigned long *)&__m256i_result[2]) = 0xf777777777777777; ++ *((unsigned long *)&__m256i_result[1]) = 0x77777777f7777777; ++ *((unsigned long *)&__m256i_result[0]) = 0xf777777777777777; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x40ff40ff40ff40ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x407b40ff40ff40f1; ++ *((unsigned long *)&__m256i_result[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_result[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_result[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_result[0]) = 0xbf84bf00bf00bf0e; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffbdff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xa000a0009f80ffcc; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffbdff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xa000a0009f80ffcc; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op0[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[3]) = 0x6f6f6f6f6f6f6f6f; ++ *((unsigned long *)&__m256i_result[2]) = 0x6f6f6f6f6f6f6f6f; ++ *((unsigned long *)&__m256i_result[1]) = 0x6f6f6f6f6f6f6f6f; ++ *((unsigned long *)&__m256i_result[0]) = 0x6f6f6f6f6f6f6f6f; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000300030000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffc000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000300030000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffc000; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x800fffffffffffff; ++ __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c +new file mode 100644 +index 000000000..995a34c18 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c +@@ -0,0 +1,152 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007773; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003373; ++ *((unsigned long *)&__m256i_result[3]) = 0xbbbbbbbbbbbbbbbb; ++ *((unsigned long *)&__m256i_result[2]) = 0xbbbbbbbbbbbb8888; ++ *((unsigned long *)&__m256i_result[1]) = 0xbbbbbbbbbbbbbbbb; ++ *((unsigned long *)&__m256i_result[0]) = 0xbbbbbbbbbbbb8888; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x44); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_result[2]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_result[1]) = 0xdededededededede; ++ *((unsigned long *)&__m256i_result[0]) = 0xdededededededede; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[1]) = 0x9090909090909090; ++ *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x6f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xf7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5858585858585858; ++ *((unsigned long *)&__m256i_result[2]) = 0x5858585858585858; ++ *((unsigned long *)&__m256i_result[1]) = 0x5858585858585858; ++ *((unsigned long *)&__m256i_result[0]) = 0x5858585858585858; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xa7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xc2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x9d9d9d9d9d9d9d8d; ++ *((unsigned long *)&__m256i_result[2]) = 0x9d9d9d9d9d9d9d9d; ++ *((unsigned long *)&__m256i_result[1]) = 0x9d9d9d9d9d9d9d8d; ++ *((unsigned long *)&__m256i_result[0]) = 0x9d9d9d9d9d9d9d9d; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x62); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[2]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[1]) = 0x2a2a2a2a2a2a2a2a; ++ *((unsigned long *)&__m256i_result[0]) = 0x2a2a2a2a2a2a2a2a; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xd5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_result[3]) = 0xe9e968c9e9e968c1; ++ *((unsigned long *)&__m256i_result[2]) = 0xe9e968c9e9e968c9; ++ *((unsigned long *)&__m256i_result[1]) = 0xe9e968c9e9e968c1; ++ *((unsigned long *)&__m256i_result[0]) = 0xe9e968c9e9e968c9; ++ __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c +new file mode 100644 +index 000000000..27eef710d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c +@@ -0,0 +1,215 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fff7fff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000005e02; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001e0007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fe37fff001fffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fe37fff001fffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fffffff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x003f60041f636003; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1fff1fff1fff1; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080ff800080ff; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff80007fff0000; ++ __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c +new file mode 100644 +index 000000000..ee91af95f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c +@@ -0,0 +1,141 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_result[2]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_result[1]) = 0x6c6c6c6c6c6c6c6c; ++ *((unsigned long *)&__m256i_result[0]) = 0x6c6c6c6c6c6c6c6c; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00fffffff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x9f9f9f9f9f9f9f9f; ++ *((unsigned long *)&__m256i_result[2]) = 0x9f9f9f9fffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x9f9f9f9f9f9f9f9f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff9fffffffff; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x9f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff7effffff46; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff7effffff46; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x42); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[1]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0xbf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_result[2]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_result[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_result[0]) = 0x5252525252525252; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x52); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fe363637fe36363; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fe363637fe36363; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x63); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefeffe0e0e0; ++ *((unsigned long *)&__m256i_result[1]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefeffe0e0e0; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0xe0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_result[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_result[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_result[0]) = 0x6b6b6b6b6b6b6b6b; ++ __m256i_out = __lasx_xvori_b (__m256i_op0, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c +new file mode 100644 +index 000000000..fa6cdff31 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c +@@ -0,0 +1,245 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf28b0686066be60; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x40d74f979f99419f; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01480000052801a2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffdcff64; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffdaaaaffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000022; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000236200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000175e0000490d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffddffdeffb5ff8d; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffdfffffffdffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffddffdeffb5ff8d; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00feffff00fe81; ++ *((unsigned long *)&__m256i_result[2]) = 0xfe01fe51ff00ff40; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00feffff00fe81; ++ *((unsigned long *)&__m256i_result[0]) = 0xfe01fe51ff00ff40; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe0df9f8f; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe0df9f8f; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff7fffffff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff7fffffff7fff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf80df80df80dfff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffdf80dfff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498100814843ffe1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4981008168410001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498100814843ffe1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4981008168410001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff896099cbdbfff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xc987ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff896099cbdbfff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xc987ffffffffffff; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffeffff97a1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffdf5b000041b0; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_result[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_result[0]) = 0x000020a4ffffbe4f; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0xffbffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_result[1]) = 0xffbffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffa; ++ __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c +new file mode 100644 +index 000000000..33b96d657 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c +@@ -0,0 +1,501 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x81f7f2599f0509c2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x51136d3c78388916; ++ *((unsigned long *)&__m256i_op1[3]) = 0x044819410d87e69a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x21d3905ae3e93be0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5125883a30da0f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6d7b2d3ac2777aeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x000019410000e69a; ++ *((unsigned long *)&__m256i_result[2]) = 0xf259905a09c23be0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000883a00000f20; ++ *((unsigned long *)&__m256i_result[0]) = 0x6d3c2d3a89167aeb; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4f7fffbf0000fe00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000004f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4f7fffe64f7fffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffbf0000fe000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fe020000fe22; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe6fe42ffc00000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00550000ffab0001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00550000ffab0001; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000400000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000000; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe00000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ffffffffff; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080010000800100; ++ *((unsigned long *)&__m256i_result[2]) = 0x00c0000000c00000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080010000800100; ++ *((unsigned long *)&__m256i_result[0]) = 0x00c0000000c00000; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000001fdfffffe02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff01fefffeff02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fd00ffff02ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff02ff; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffee0000004c0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff050000ff3c0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f9000000780000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffa80000ff310000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001d0000001d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001d0000001d00; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x6); ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ff890000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff790000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff890000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff790000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff790000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff790000; ++ __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfbff0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe700000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe7007b007e; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe700000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe7007b007e; ++ __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0003fffc0803fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc0803fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fffc0000fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffc0000fff8; ++ __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c +new file mode 100644 +index 000000000..cdd20e881 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c +@@ -0,0 +1,575 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f057f0b7f5b007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7fff7fff7fff00; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff0fff005f0f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff0fff005f0f; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff000607f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010017e7d1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff000607f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001001807f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002555500000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000005400; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000005400; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007fff8000ffff0; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff00ff00ff00; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f0000000f000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000022be22be; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000022be22be; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fffa2bea2be; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff10000fff10000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff0000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000555500005555; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000555500005555; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000555500005555; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000555500005555; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fff80000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fff80000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fff80000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff80000; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff00007fff; ++ __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c +new file mode 100644 +index 000000000..d2e742e81 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c +@@ -0,0 +1,526 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x639c3fffb5dffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb8c7800094400001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000e000c000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0009000100040001; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004000400040805; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000400040805; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004000400040805; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400040805; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008000800000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0806050008060500; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000800000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000100; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000040002; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x34000000fff00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff6e00000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3380000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x363c0000fff3c000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000030000000c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000500000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000010; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00c100c100c100c1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00c100c100c100c1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080800000808; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000100000001; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000020; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000001555; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000015554001c003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000001555; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000015554001c003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000304; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000030401010202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000304; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000030401010202; ++ __m256i_out = __lasx_xvpcnt_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000a0008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000a0008; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000030000; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000040000001b; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000040000001b; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000b000b000b000b; ++ __m256i_out = __lasx_xvpcnt_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001f00000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001f00000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001200000012; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpcnt_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c +new file mode 100644 +index 000000000..66faa74d0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffff90ffffff81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000007f; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffe81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe81; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001341c4000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001000310000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000033e87ef1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000002e2100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000011c00; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000e8f1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000103100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000002e00; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000004290; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004290; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000004290; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004290; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202031; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202031; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0xd0d8eecf383fdf0d; ++ __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x80208020c22080a7; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x80208020c22080a7; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xe07de0801f20607a; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000800080010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000800080010000; ++ __m256i_out = __lasx_xvpickev_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff9fffffff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ffffd8020010001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff9fffffff9; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000060002000a; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000060002000a; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c +new file mode 100644 +index 000000000..a9778809f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003f8040002f607; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002728b00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6651bfff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003f8040002f607; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffff328dfff; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00200020ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e0000001e000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00200020ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e0000001e000000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00800080ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00800080ffffffff; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000040004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xb70012c4b714fc1e; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff017e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fe02b71c199d; ++ *((unsigned long *)&__m256i_result[0]) = 0x017e017e00ff017e; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0049ffd2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01620133004b0032; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_result[3]) = 0xc080ffff0049ffd2; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffeffb9ff9d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00010000002fff9e; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbabababababababa; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8787878a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3fc6c68787; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f87870000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fff003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fff; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000002467db99; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003e143852; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000002467db99; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003e143852; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7bbbbbbbf7777778; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000246700003e14; ++ *((unsigned long *)&__m256i_result[2]) = 0x000044447bbbf777; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000246700003e14; ++ *((unsigned long *)&__m256i_result[0]) = 0x000044447bbbf777; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0006000000020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0006000000020000; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xbff00000bff00000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xbff00000bff00000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87f7f7f807f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87f7f7f807f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffe98; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe98; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f00000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c +new file mode 100644 +index 000000000..a2edbb80a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c +@@ -0,0 +1,130 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x010180068080fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000c40086; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff820002ff820002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff820002ff820002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c +new file mode 100644 +index 000000000..8bd3a8273 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c +@@ -0,0 +1,388 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0cc08723ff900001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xcc9b89f2f6cef440; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ int_result = 0x000000000000ffff; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ int_result = 0x000000000000007f; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefdfffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000fffffefd; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5555555580000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5555555580000000; ++ int_result = 0x0000000055555555; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002000400000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020006; ++ unsigned_int_result = 0x0000000000020006; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000; ++ long_int_result = 0x1f0fdf7f3e3b31d4; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00fe01fc01fe01fc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x012c002c001c0006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00fe01fc01fe0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x012c002c001c000a; ++ long_int_result = 0xfe01fc01fe0000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ unsigned_long_int_result = 0x00000000ffffffff; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_out, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ long_int_result = 0x00000000ffff0100; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ int_result = 0x000000007ff00000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ unsigned_long_int_result = 0x00000000ffffffff; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0xffffffffffffffff; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff0100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff0100000001; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff0008; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ long_int_result = 0x000000000000ffff; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010; ++ unsigned_int_result = 0x0000000000100010; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100040; ++ unsigned_int_result = 0x0000000000000040; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ long_int_result = 0xffffffffffffffff; ++ long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000ffffffff; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000ffffffff; ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffd880; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffd880; ++ int_result = 0x0000000000000000; ++ int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c +new file mode 100644 +index 000000000..9346f9bfb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c +@@ -0,0 +1,20 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c +new file mode 100644 +index 000000000..9346f9bfb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c +@@ -0,0 +1,20 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c +new file mode 100644 +index 000000000..81456bc1b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c +@@ -0,0 +1,380 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ int_op0 = 0x0000001b3c4c0a5c; ++ *((unsigned long *)&__m256i_result[3]) = 0x3c4c0a5c3c4c0a5c; ++ *((unsigned long *)&__m256i_result[2]) = 0x3c4c0a5c3c4c0a5c; ++ *((unsigned long *)&__m256i_result[1]) = 0x3c4c0a5c3c4c0a5c; ++ *((unsigned long *)&__m256i_result[0]) = 0x3c4c0a5c3c4c0a5c; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000397541c58; ++ *((unsigned long *)&__m256i_result[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[0]) = 0x97541c5897541c58; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_result[0]) = 0x0400040004000400; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000084; ++ *((unsigned long *)&__m256i_result[3]) = 0x0084008400840084; ++ *((unsigned long *)&__m256i_result[2]) = 0x0084008400840084; ++ *((unsigned long *)&__m256i_result[1]) = 0x0084008400840084; ++ *((unsigned long *)&__m256i_result[0]) = 0x0084008400840084; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000020202020; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020006; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ long_op0 = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000020006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020006; ++ __m256i_out = __lasx_xvreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c +new file mode 100644 +index 000000000..7aa76c2ba +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c +@@ -0,0 +1,536 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000001b3c4c0a5c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ int_op1 = 0x0000000059815d00; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ int_op1 = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[0]) = 0x555555ab555555ab; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000012e2110; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ int_op1 = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x003f003f003f003f; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f003f; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x003f003f003f003f; ++ __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003f0000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_result[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_result[1]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_result[0]) = 0xe161616161614e60; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ int_op1 = 0x00000000000000ac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00d5007f00ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202020; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffff7fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffff7fffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc192181230000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff00ff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fef7fef7fef7fef; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00ffffffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff0000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f010700c70106; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f010700c70106; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_result[2]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_result[1]) = 0x0106010601060106; ++ *((unsigned long *)&__m256i_result[0]) = 0x0106010601060106; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003fff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000404; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[0]) = 0x0404040404040404; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000202; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000202; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000003ddc5dac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c +new file mode 100644 +index 000000000..a2bc2da52 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c +@@ -0,0 +1,471 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffeffffff88; ++ *((unsigned long *)&__m256i_op0[2]) = 0x61e0000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffeffffff88; ++ *((unsigned long *)&__m256i_op0[0]) = 0x61e0000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff80fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xd52aaaaa555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff80fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xd52aaaaa555555ab; ++ *((unsigned long *)&__m256i_result[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[0]) = 0x555555ab555555ab; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fff3fff3fff3fff; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000001; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0007fd00000f02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff02ff; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfc00ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfc00ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00fe00fe00fe00fe; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[3]) = 0x047a047a047a047a; ++ *((unsigned long *)&__m256i_result[2]) = 0x047a047a047a047a; ++ *((unsigned long *)&__m256i_result[1]) = 0x047a047a047a047a; ++ *((unsigned long *)&__m256i_result[0]) = 0x047a047a047a047a; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x037fe01f001fe020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x037fe01f001fe020; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvreplve0_d (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x800080ff800080ff; ++ __m256i_out = __lasx_xvreplve0_w (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvreplve0_q (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_result[3]) = 0x97a297a297a297a2; ++ *((unsigned long *)&__m256i_result[2]) = 0x97a297a297a297a2; ++ *((unsigned long *)&__m256i_result[1]) = 0x97a297a297a297a2; ++ *((unsigned long *)&__m256i_result[0]) = 0x97a297a297a297a2; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_h (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvreplve0_b (__m256i_op0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c +new file mode 100644 +index 000000000..9346f9bfb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c +@@ -0,0 +1,20 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c +new file mode 100644 +index 000000000..21446e55e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0001ff02; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff020afefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000003fefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffefff7fff7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7ffffffbfffb; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff0001ff02; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff020afefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000003fefd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0001ff04; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff02a0fefc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000cfefd; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff010000fff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff19; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff02ff020001fffa; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000100010001fffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x807f807f00000380; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007380; ++ *((unsigned long *)&__m256i_result[1]) = 0xc03fc03f000001c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001c0; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffe40; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000fedd; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fedd; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000fedd; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000fedd; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x805f0000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x80be0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x80be0000ffffffff; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff457d607f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000457d607d; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff457d607f; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0ffffffe0ffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ffffffe0ffffffe; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_result[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_result[0]) = 0x001fc0200060047a; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ca0000fff80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x381800007af80000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x381800007af80000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000086fe0000403e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000403e00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001bfa000000f9; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000f900004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001bfa000000f9; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000f900004040; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0607ffff0607; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000faf3f3f2; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0607ffff0383; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0607ffffc0c1; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0607ffff0383; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0607ffffc0c1; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f433c79; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f433c79; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007f8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007f8000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffdfff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffdfff80; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c +new file mode 100644 +index 000000000..c1b8e1752 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c +@@ -0,0 +1,394 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbea2e127c046721f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1729c073816edebe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xde91f010000006f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ef1f90efefaf30d; ++ *((unsigned long *)&__m256i_result[3]) = 0x515f93f023600fb9; ++ *((unsigned long *)&__m256i_result[2]) = 0x948b39e0b7405f6f; ++ *((unsigned long *)&__m256i_result[1]) = 0x48ef087800007c83; ++ *((unsigned long *)&__m256i_result[0]) = 0x78af877c7d7f86f9; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7fff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f007f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f7f7fff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfff807f; ++ *((unsigned long *)&__m256i_result[1]) = 0xbf803fbfbfbfbfbf; ++ *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfff807f; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002a54290; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000007f0000007f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007f0000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff80ff01ff80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff800000007e; ++ *((unsigned long *)&__m256i_result[3]) = 0x003f8000003f8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x003f8000003f8000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc07f80ffc07f80; ++ *((unsigned long *)&__m256i_result[0]) = 0xffc07f80003f0000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x24); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x4343434343434343; ++ *((unsigned long *)&__m256i_result[2]) = 0x4343434343434343; ++ *((unsigned long *)&__m256i_result[1]) = 0x4343434343434343; ++ *((unsigned long *)&__m256i_result[0]) = 0x4343434343434343; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x38); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffdffd; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffdffd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffdffd; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffdffd; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000f0000000f000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000f0000000f000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000007fc00000400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000040000000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x000007fc00000400; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000040000000400; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00f7000000f70006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00f7000000f70006; ++ __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffbfffffffb; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffbfffffffb; ++ __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c +new file mode 100644 +index 000000000..2a4f29b50 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op1[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_op1[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_op1[0]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0xa020202020206431; ++ *((unsigned long *)&__m256i_result[1]) = 0xa020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0xa020202020206431; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffa80000ff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffee0000ff4c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff050000ff3c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fff90000ff78; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffa80000ff31; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff810011; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000200000008; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000200000008; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7efefefe80ffffff; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007fde00007fd4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fe000007fe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff7edfffff7edf; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff7eddffff7ed3; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff7edfffff7edf; ++ __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffc81aca; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003a0a9512; ++ *((unsigned long *)&__m256i_op0[1]) = 0x280ac9da313863f4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe032c739adcc6bbd; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffdffffffc81aca; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff3a0b9512; ++ *((unsigned long *)&__m256i_result[1]) = 0x280bc9db313a63f5; ++ *((unsigned long *)&__m256i_result[0]) = 0xe032c738adcb6bbb; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x1fe01e0100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffa; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf800d0d8ffffeecf; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000383fffffdf0d; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf000f000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf000f000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xe800c0d8fffeeece; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff383efffedf0c; ++ *((unsigned long *)&__m256i_result[1]) = 0xe800c0d8fffeeece; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff383efffedf0c; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe0000fffe0002; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe0000fffe0002; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[0]) = 0x7575757575757575; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fc00fc00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fc00fc00; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fc00fc00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fc00fc00; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007b00f9007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000007b00f9007e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000007b00f9007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007b00f9007e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000f601f200fc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000f601f200fc; ++ __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000007f00000022; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007f00000022; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f00000000; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[0]) = 0x1c1b1a191c1b1a19; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffb5ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002ff80ffb70000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffb7ff80ffd0ffd8; ++ *((unsigned long *)&__m256i_result[1]) = 0x00010000002fff9e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffb5ff80ffd0ffd8; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000807e7ffe; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[0]) = 0xc2c2c2c2c2c2c2c2; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0020001d001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000006040190d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000006040190d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800200028; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c +new file mode 100644 +index 000000000..a3afc9811 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001700080; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43e019c657c7d050; ++ *((unsigned long *)&__m256i_op1[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43e019c657c7d050; ++ *((unsigned long *)&__m256i_result[3]) = 0x86ff76ffff4eff42; ++ *((unsigned long *)&__m256i_result[2]) = 0x86ffffffffff9eff; ++ *((unsigned long *)&__m256i_result[1]) = 0x86ff76ffff4effff; ++ *((unsigned long *)&__m256i_result[0]) = 0x86ff32ffaeffffa0; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff8910ffff7e01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff3573ffff8960; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff5e5ffff8130; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff8910ffff7e01; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff3573ffff8960; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff8910ffff1ca9; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff5e5ffff8130; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000f90; ++ __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffe200000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fffe00008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffe200000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffe00008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[3]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_result[2]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_result[1]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_result[0]) = 0x7575ffff7575f575; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000f0f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000f0f0; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000003c01ff9; ++ __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000080; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c +new file mode 100644 +index 000000000..b4ac50271 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c +@@ -0,0 +1,537 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000800080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc9d8080067f50020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc70000020000c000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf000f00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000f000f0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf0f008000ff5000f; ++ *((unsigned long *)&__m256i_result[0]) = 0xf00000020000f000; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000e000e000e000e; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fea00013fec; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe50001c013; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fea00013fec; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe50001c013; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff0000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff0000ff00; ++ __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000399400003994; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000fff00000fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000fff00000fff; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffc0ffc0ffc0ffc0; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00002df900001700; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffe05ffffe911; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00002df900001700; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffe05ffffe911; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffcfffffffc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffcfffffffc; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffdd97dc4; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffdd97dc4; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f007f007f; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000003fffff; ++ __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000200000022; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0049004200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007f00000022; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007f00000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffefffffefd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_result[3]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x007fffff007fffff; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ffff0001ffff; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000080000001000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000080000001000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000f0000000f; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_result[3]) = 0x000007ff000007ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000007fffffff800; ++ *((unsigned long *)&__m256i_result[1]) = 0x000007ff000007ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000007fffffff800; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[0]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffc00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffc00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffc00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffc00000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff605a; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_result[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff8d9ffa7103d; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe000000000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001ffffffff; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fffffffffffffff; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000002c21ffeff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc0000000c0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000002c21ffeff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc0000000c0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x32); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c +new file mode 100644 +index 000000000..e5ee89deb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c +@@ -0,0 +1,427 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_result[2]) = 0x3f3f3f3f3f3f3f3f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000003f3f3f3f; ++ *((unsigned long *)&__m256i_result[0]) = 0x3f3f3f3f00000000; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000017f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000017f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f00000000000000; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00071f1f00071f1f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x00071f1f00071f1f; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fff3fff; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x001f001f001f001f; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001fff00001fff; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003f003f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000003f003f; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc0090000c0200060; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc0090000c0200060; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f0000007f0060; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op0[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op0[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0003000300030000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003000300030000; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000203ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ff03ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000203ff; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000f0000000f; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffcfffc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffcfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003fff; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fffffff0fffffff; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffff08; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fffffff0fffffff; ++ __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000003ffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000003ffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000003ffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000003ffffffffff; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x29); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x419cd5b11c3c5654; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000001ff; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007fffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007fffff; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007f7f03030000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f03030000; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x37); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c +new file mode 100644 +index 000000000..2a42386ce +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000095120000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc9da000063f50000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc7387fff6bbfffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ffff00ff000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcf800fffcf800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff00fffffff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00fffffff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000005be55bd2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010003; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ff00ff00; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffee0000004c0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff050000ff3c0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00f9000000780000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffa80000ff310000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff000000ff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff000000ff000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001005500020000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000005500000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001005500020000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffefff7f00100080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffefff7f00100080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff00ffffffff; ++ __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000501ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000701ffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000501ffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000701ffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000260a378; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000d02317; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000260a378; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000d02317; ++ *((unsigned long *)&__m256i_op1[3]) = 0x003f020001400200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x003f00ff003f00c4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x003f020001400200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x003f00ff003f00c4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c +new file mode 100644 +index 000000000..5478d19c1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c +@@ -0,0 +1,449 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffdfe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffdfe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00197d3200197d56; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00197d3200197d56; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_h (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000bdfef907bc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000bdfef907bc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_b (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_d (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x800fffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvseqi_w (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c +new file mode 100644 +index 000000000..c8a00ca89 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c +@@ -0,0 +1,430 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007ffffffff7ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x49d8080067f4f81f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f00fffff7ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xd8490849f467f867; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0xb7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0xdb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x95); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0xee); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x6f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007ffffffff7ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x49d8080067f4f81f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7ffff7ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x080008000800f81f; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xa8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_result[2]) = 0xc5c545c545c545c5; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_result[0]) = 0xc5c545c545c545c5; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xf7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xa7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xdc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff8001ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff8001ffff8001; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x6e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x9f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002ffff00020002; ++ *((unsigned long *)&__m256i_result[2]) = 0x04f504f104f504f5; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002ffff00020002; ++ *((unsigned long *)&__m256i_result[0]) = 0x04f504f104f504f5; ++ __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x65); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e18000000000000; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xfe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x64); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c545c5c5c5; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xb0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000f9f900; ++ *((unsigned long *)&__m256i_op0[2]) = 0x79f9f9f900f9f9e0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000f9f900; ++ *((unsigned long *)&__m256i_op0[0]) = 0x79f9f9f900f9f900; ++ *((unsigned long *)&__m256i_result[3]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0x79f9f9f900000000; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x97); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007aff7c00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffd017d00; ++ *((unsigned long *)&__m256i_result[3]) = 0x7aff7c0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfd017d0000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7aff7c0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfd017d0000000000; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xb3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[3]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[2]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[1]) = 0xc3f0c3f0c3f0c3f0; ++ *((unsigned long *)&__m256i_result[0]) = 0xc3f0c3f0c3f0c3f0; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xf4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_result[3]) = 0xff81ff7dffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_result[1]) = 0xff81ff7dffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff81ff7d; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000020ff790020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000020ff790020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xa5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010183f95466; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01010101d58efe94; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010183f95466; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x01010101d58efe94; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xa7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xd9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001fff00001fff; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80be0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000f0f0002; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80be0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000f1002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x80000000ff800000; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xdb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ef878780000009; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x36); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x5a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_result[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_result[0]) = 0x04f104f104f504ed; ++ __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c +new file mode 100644 +index 000000000..641ea2315 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c +@@ -0,0 +1,761 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000007070707; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0102040000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000020100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0703020000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe02fe02fee5fe22; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff49fe4200000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffffff8fc000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fefefe; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x67eee33567eee435; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x67eee33567eee435; ++ *((unsigned long *)&__m256i_op2[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7575ffff75757595; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7575ffff7575f575; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op2[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op2[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op2[0]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_result[3]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[2]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800f800; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000ffff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff88ff88ff880000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff88ff88ff880000; ++ __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_op1[3]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[2]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[1]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op1[0]) = 0x98111cca98111cca; ++ *((unsigned long *)&__m256i_op2[3]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_op2[1]) = 0x000000010000ffe1; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000101001e18; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000101001e18; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80008000b3e8fef1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80008000802ea100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op2[0]) = 0x00000000012e2110; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x012e2110012e2110; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000082a54290; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000028aa700; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000082a54290; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54287; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fc00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fc00000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fc00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fc00000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xdfffffffdfffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0xdfffffffdfffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000104000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000104000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000500040005; ++ __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op1[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffffe01fe52; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff01ff02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffffe01fe52; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff01ff02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000080008001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000080008001; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_op2[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff800000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff800000ff; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000080040; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffeb8649d0d6250; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffeb8649d0d6250; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op2[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op2[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op2[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c +new file mode 100644 +index 000000000..2a6eee0fd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c +@@ -0,0 +1,665 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[2]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[1]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op0[0]) = 0x97541c5897541c58; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff605a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202810102020202; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fefe0000fefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff0000fefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fefe0000fefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff0000fefe; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000017547fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000017547fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x807e80fd80fe80fd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80938013800d8002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x807e80fd80fe0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80938013800d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000801380f380fe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000801380f300fb; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffd5a98; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000101ff01; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff80ff00ff80ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80ff00ff80ff01; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fd; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff000000010000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3880800037800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3901000039010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3880800037800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3901000039010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fc00000428a; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffeffee; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe0000fffe0012; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000001ffff; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80be0000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80be0000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff00000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_result[3]) = 0x2080208020802080; ++ *((unsigned long *)&__m256i_result[2]) = 0x203e208020802079; ++ *((unsigned long *)&__m256i_result[1]) = 0x2080208020802080; ++ *((unsigned long *)&__m256i_result[0]) = 0x203e208020802079; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000004e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022beb03f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffa2beb040; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffba8300004fc2; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffba8300004fc2; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x004100df00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00c000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x004100df00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00c000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc1d75053f0000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000104000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffa30000165a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000104000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc1d75053f0000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xbe21000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000505300000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xbe21000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000505300000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000001880310877e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000001880310877e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001faf19b60; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6c2905ae7c14c561; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001faf19b60; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6c2905ae7c14c561; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x94d7fb5200000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000180; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffc00040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffc00040; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffdbff980038ffaf; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffafffe80004fff1; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffdbff980038ffaf; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffafffe80004fff1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffc; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000fffd0003; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0002fffd; ++ __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c +new file mode 100644 +index 000000000..ed752df00 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c +@@ -0,0 +1,575 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000460086; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f0079; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000f30028; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000df00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf28b0686066be60; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ffffff00ff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff00ff00ffff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000ff00ff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ffffff00ffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffbfffa0ffffff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffbfffa0ffffff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ffff0000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ffff0000ff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffee; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffdfff80ffdfff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffdfff80ffdfff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffdfff80ffdfff80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffdfff80ffdfff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fef7fef7fef7fef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2aaaaa85aaaaaa85; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2aaa48f4aaaa48f4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2aaaaa85aaaaaa85; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2aaa48f4aaaa48f4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001a00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000083f95466; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010100005400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00197d3200197d56; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00197d3200197d56; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffe0001fffe0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ffe0001fffeffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fdfdfe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00f7000000f70007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00f7000000f70007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01fffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01fffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x800080ff800080ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000002d; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc02dc02dc02dc02d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000002d; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc02dc02dc02dc02d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c +new file mode 100644 +index 000000000..bc98b41af +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c +@@ -0,0 +1,590 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8011ffae800c000c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00baff050083ff3c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80b900b980380038; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0017ffa8008eff31; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000003ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000003ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000feb60000b7d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000feb60000c7eb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000feb60000b7d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000feb60000c7eb; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb683007ffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c0df5b41cf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb683007ffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c0df5b41cf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001497c98ea4fca; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001497c98ea4fca; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010201010204; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010102; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00020421d7d41124; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00020421d7d41124; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x94d7fb5200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c +new file mode 100644 +index 000000000..06717802c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000101ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00010013000100fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00010013000100fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_b (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x014200c200c200ae; ++ *((unsigned long *)&__m256i_op0[2]) = 0x014200c200c200ae; ++ *((unsigned long *)&__m256i_op0[1]) = 0x014200c200c200ae; ++ *((unsigned long *)&__m256i_op0[0]) = 0x014200c200c200ae; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffff8900000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffff8900000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvslei_h (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000460086; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f0079; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000f30028; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000df00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfc2f3183ef7ffff7; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_w (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc6c6c6c68787878a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000003f3f3f3c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8787878a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_d (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c +new file mode 100644 +index 000000000..093d5640e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c +@@ -0,0 +1,438 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff00; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f3c611818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x032eafee29010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f3c611818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x032eafee29010000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000ffffff; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000f788f788; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00217f19ffde80e6; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00037f94fffc806b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00217f19ffde80e6; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00037f94fffc806b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x03802fc000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x03802fc000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c +new file mode 100644 +index 000000000..7179e715c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe0000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000fffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff2f93bfff2fff2; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff2f7bcfff2f7bd; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff2f93bfff2fff2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcf800fffcfffc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffcfffc; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0e400; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x5980000000000000; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000800000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000001ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ff8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x800000ff800000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x800000ff800000ff; ++ __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c +new file mode 100644 +index 000000000..003e29b67 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fa022a01a401e5; ++ *((unsigned long *)&__m256i_op0[2]) = 0x030d03aa0079029b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x024c01f901950261; ++ *((unsigned long *)&__m256i_op0[0]) = 0x008102c2008a029f; ++ *((unsigned long *)&__m256i_result[3]) = 0x54000000ca000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x5400000036000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf2000000c2000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x840000003e000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff1001100100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff1001100100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfcc4004400400000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0040400000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfcc4004400400000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0040400000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffef000004ea; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffef000004ea; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffefffffffef; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffbf4; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800e000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100010001000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1616161616161616; ++ *((unsigned long *)&__m256i_op0[2]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe16167f161616; ++ *((unsigned long *)&__m256i_op0[0]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x2c2c2c2c2c2c2c2c; ++ *((unsigned long *)&__m256i_result[2]) = 0x2c2c2c2cfefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefc2c2cfe2c2c2c; ++ *((unsigned long *)&__m256i_result[0]) = 0x2c2c2c2cfefefefe; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[2]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m256i_result[0]) = 0xf8f8f8f8f8f8f8f8; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1f60000000c00000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x60000000c0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x60000000c0000000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff80ff80ff80ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff80ff80ff80ff80; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008000000080; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00080008000801ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00080008000801ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[0]) = 0xf0f0f0f0f0f0f0f0; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x03f0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x03f0000000000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffff80000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffff80000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[2]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800f800; ++ *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800f800; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0404000004040000; ++ __m256i_out = __lasx_xvslli_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000c040c0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000c040c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff000000; ++ __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c +new file mode 100644 +index 000000000..ef3a47da5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c +@@ -0,0 +1,339 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe0ffe0ffe0ffe0; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000003f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000003f0; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256i_result[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_result[1]) = 0x0218ff78fc38fc38; ++ *((unsigned long *)&__m256i_result[0]) = 0xfc00000000000048; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0fff0fff0fc00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0fff0fff0fc00; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfc00000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffc00fffffc00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffc00fffffc00; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000a000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000a000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400000004000; ++ __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffbf4; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffc; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdbc8000000003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffbff1ffffbff1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffbff1ffffbff1; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffeffc4000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffeffc4000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffeffc4000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffeffc4000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffe06003fc000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000feccfecc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000feccfecc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fe363637fe36364; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe36364661af18f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fe363637fe36364; ++ *((unsigned long *)&__m256i_result[3]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00001ff8d8d90000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001ff8d8d8c000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00001ff8d8d90000; ++ __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c +new file mode 100644 +index 000000000..76651af63 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256i_result[1]) = 0x03fc03fc03f803f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe01fe01fe; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fc03e000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fc03e000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00fe01e000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00fe01e000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x07fee332883f86b0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07fed3c8f7ad28d0; ++ *((unsigned long *)&__m256i_result[3]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_result[2]) = 0x3dc02b400a003400; ++ *((unsigned long *)&__m256i_result[1]) = 0x01c03f8034c03200; ++ *((unsigned long *)&__m256i_result[0]) = 0x3dc02b400a003400; ++ __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00aa000000ac00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002a80000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002b0000003f800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002a80000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002b0000003f800; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc1be9e9e9f000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x41d8585858400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1076000016160000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1610000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1076000016160000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1610000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007f00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x311d73ad3ec2064a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001fc000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000c475ceb40000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fb0819280000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004040404000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004040404000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000007c8; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000086000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00040ff288000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000086000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00040ff288000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000001ffe00000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000001ffe00000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ffc8ff88; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ffc8ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ff91ff100000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ff91ff100000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000008c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000008c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001180000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001180000000; ++ __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c +new file mode 100644 +index 000000000..ca1f5e94f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c +@@ -0,0 +1,455 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff0000ffffffff; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000860601934; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffffffff; ++ __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022222221; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3dddddddfbbb3bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022222221; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3dddddddfbbb3bbc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000f000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffe0df9f8e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c +new file mode 100644 +index 000000000..6864f5eb8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1828f0e09bad7249; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffc1b723953cec; ++ *((unsigned long *)&__m256i_op0[1]) = 0x61f2e9b333aab104; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6bf742aa0d7856a0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0d41c9a7bdd239a7; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0b025d0ef8fdf987; ++ *((unsigned long *)&__m256i_op1[1]) = 0x002944f92da5a708; ++ *((unsigned long *)&__m256i_op1[0]) = 0x038cf4ea999922ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff0000ffff00ff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff000000ffffff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffff00ff; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdb801b6d0962003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdb8a3109fe0f0024; ++ *((unsigned long *)&__m256i_op0[1]) = 0x9a7f997fff01ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbe632a4f1c3c5653; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000500000005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202031; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202031; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000040b200002fd4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007fff0000739c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x386000003df80000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbc74c3d108e05422; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbc1e3e6a5cace67c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbc74c3d108e0544a; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbc18e696a86565f4; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbc74c3d108e05422; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbc1e3e6a5cace67c; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbc74c3d108e0544a; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbc18e696a86565f4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef87878000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef87878000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000017f00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007f7f03030000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100007fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000003fbfc04; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001fdfe02; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000003fbfc04; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001fdfe02; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010511c54440437; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010511c54440437; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c +new file mode 100644 +index 000000000..7dd2778a5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c +@@ -0,0 +1,548 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_b (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe05fc47b400; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffe06003fc000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_h (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc58a0a0a07070706; ++ *((unsigned long *)&__m256i_op0[2]) = 0x006b60e4180b0023; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1b39153f334b966a; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf1d75d79efcac002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff90ff81; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000045000d0005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000045000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000047000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000047000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_w (__m256i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x80000000001529c1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80007073cadc3779; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80000000001529c1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80007073cadc3779; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_d (__m256i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c +new file mode 100644 +index 000000000..d93e4314e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00220021004a007e; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ff00ff00ff00; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000e0e0e0e0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007773; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003373; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c +new file mode 100644 +index 000000000..2bf9ae9c3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc5890a0a07070707; ++ *((unsigned long *)&__m256i_op1[2]) = 0x006be0e4180b8024; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1b399540334c966c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x71d7dd7aefcac001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe651bfff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffbf7f7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe651bfff; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe0000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000bf6e0000c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000030000fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000800000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000bf6e0000c916; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000030000fff3; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0e400; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1cfd000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000017e007ffe02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004500f300fb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_result[2]) = 0x6161616100000018; ++ *((unsigned long *)&__m256i_result[1]) = 0x6161616161616161; ++ *((unsigned long *)&__m256i_result[0]) = 0x6161616100000018; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004411; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f007f0081007f; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01ae00ff00ff; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007000700070007; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d20227a78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x132feeabd2d33b38; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000c0300000019a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0c08032100004044; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000265ffa5a6767; ++ *((unsigned long *)&__m256i_result[0]) = 0x0c08032100000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f433c78; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00feff0100feff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00feff0100feff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800300000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff801000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800300000000; ++ __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000017fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000017fff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000f00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000f00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c +new file mode 100644 +index 000000000..a51be899b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c +@@ -0,0 +1,504 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x006be0e4180b0024; ++ *((unsigned long *)&__m256i_result[1]) = 0x1b39153f334b166b; ++ *((unsigned long *)&__m256i_result[0]) = 0xf1d7dd7aefcac002; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab; ++ *((unsigned long *)&__m256i_result[3]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_result[2]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_result[1]) = 0x1555156a1555156a; ++ *((unsigned long *)&__m256i_result[0]) = 0x1555156a1555156a; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000bea20000e127; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000c0460000721f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000de910000f010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000006f9; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000bea20; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000c0460; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000de910; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffff800fffff800; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffff800fffff800; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffff800fffff800; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffff800fffff800; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007f017f01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007f017f01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007f017f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007f017f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffd8ffc7ffdaff8a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fff01800fff0181; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fff01800fff0181; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007ff800007ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007ff800007ff80; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffc00000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffc03fffffffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffc00000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000007ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000007ffffffff; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000fef0ff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000fef0ff0; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc008fa01c0090000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f804000c008f404; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc008fa01c0090000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f804000c008f404; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_result[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_result[0]) = 0x001fc0200060047a; ++ __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fffe00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffe00000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e9e1e9e1e9e1e9e; ++ *((unsigned long *)&__m256i_result[2]) = 0x1e9e1e9e1e9e1e9e; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e9e1e9e1e9e1e9e; ++ *((unsigned long *)&__m256i_result[0]) = 0x1e9e1e9e1e9e1e9e; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffc0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffc0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0005fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f004f204f204f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0005fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f004f204f204f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000900000009; ++ __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x761ed60b5d7f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdc9938afafe904f1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x761ed60b5d7f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdc9938afafe904f1; ++ *((unsigned long *)&__m256i_result[3]) = 0x03b0feb002eb0000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfee401c5fd7f0027; ++ *((unsigned long *)&__m256i_result[1]) = 0x03b0feb002eb0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfee401c5fd7f0027; ++ __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c +new file mode 100644 +index 000000000..e08934b12 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c +@@ -0,0 +1,455 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000013ffffffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000013ffffebd8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000013ffffffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000013ffffebd8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfebdff3eff3dff52; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007ffe7ffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe7ffe7ffe8000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000807e7ffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8091811081118110; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80a6802680208015; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8091811081110013; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80a6802680200018; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffefffe0000feff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeff0000007e7f; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000c8; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000440800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000440800; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfc01fc0101fe01dd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfc01fc0101fe01dd; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000054; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c +new file mode 100644 +index 000000000..44c20a954 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c +@@ -0,0 +1,545 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000003ffffffff; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe1e800002f03988d; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe1e800002f03988d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff0f400001781cc4; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff0f400001781cc4; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc5c545c545c545c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c4c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc5c545c545c545c5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff000000f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbc8ff0ffffffcff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbc8ff0ffffffcff8; ++ *((unsigned long *)&__m256i_result[3]) = 0xfcfcfcfcfc040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fbfffffc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfcfcfcfcfc040404; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fbfffffc; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x14131211100f0e0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0c0b0a0908070605; ++ *((unsigned long *)&__m256i_op0[1]) = 0x14131211100f0e0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0c0b0a0908070605; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x40); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000002a542a; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000242; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000242; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0707feb608c9328b; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc237bd65fc892985; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0707feb608c9328b; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc237bd65fc892985; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00150015003a402f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x333568ce26dcd055; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00150015003a402f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x333568ce26dcd055; ++ *((unsigned long *)&__m256i_result[3]) = 0x0e0f1192846ff912; ++ *((unsigned long *)&__m256i_result[2]) = 0x002a0074666a4db9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0e0f1192846ff912; ++ *((unsigned long *)&__m256i_result[0]) = 0x002a0074666a4db9; ++ __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffdfffffffdff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffdfffffffdff; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x37); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3f7f7f7eff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3f7f7f7eff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007efeff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007efeff00; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff3e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff3e; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x70); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000200020018; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000200020008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00c0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0040000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000f0f0003; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000f1003; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fefefe000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fefefe000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01010101010101c9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[3]) = 0x0008080808080808; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008080808080808; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000003c; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x45); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f3009500db00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f3009500db00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000003cc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003cc0; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000400100013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000400100014; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000400100013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000020200000202; ++ *((unsigned long *)&__m256i_result[2]) = 0x4100004141410000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000020200000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4100004141410000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000956a00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000956a00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xb500000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xb500000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe00000ffe00000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe00000ffe00000; ++ __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_w_d (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_w_d (__m256i_op0, __m256i_op1, 0x34); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffc0; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffff80; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000040e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000040e7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000200000000000; ++ __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x21); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3ff9fffa3ff9fffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3ff9fffa3ff9fffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007ff3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007ff3; ++ __m256i_out = __lasx_xvsrani_w_d (__m256i_op0, __m256i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c +new file mode 100644 +index 000000000..fb47385c0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c +@@ -0,0 +1,725 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x38a966b31be83ee9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5f6108dc25b80001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long *)&__m256i_op0[0]) = 0x683b8b67e20c0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000501e99b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000109973de7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001020f22; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001890b7a39; ++ *((unsigned long *)&__m256i_result[3]) = 0x38a966b301f41ffd; ++ *((unsigned long *)&__m256i_result[2]) = 0x5f6108ee13ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf41a56e8d10201f6; ++ *((unsigned long *)&__m256i_result[0]) = 0x683b8b34f1020001; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000707; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010200000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000070300000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01480000052801a2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffdcff64; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000001010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101000001010000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff01ff3400000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff83ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff82037dfd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_result[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_result[0]) = 0x45baa7ef6a95a985; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000800; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d0000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001a00; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff02ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffff0100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00fefffeff02ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000100; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00feff00000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2b2a292827262524; ++ *((unsigned long *)&__m256i_op1[2]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2b2a292827262524; ++ *((unsigned long *)&__m256i_op1[0]) = 0x232221201f1e1d1c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8e8e8e8e8f0e8e8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8e8e8e8e8f0e8e8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000007ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7171717171010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x8e8e8e8e8f00ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7171717171010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x8e8e8e8e8f00ffff; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffe05f8102; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001607f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001607f0000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x43ef878780000009; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00005053000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00005053000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffa3; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000800200027; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800200028; ++ *((unsigned long *)&__m256i_result[3]) = 0x006018000000001a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0060401900000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x006018000000001a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0060401900000000; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040404040; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ffffff1dff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ffffff1dff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff1dffffff1dff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff1dffffff1dff; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c +new file mode 100644 +index 000000000..63ba92ead +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c +@@ -0,0 +1,471 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x81f7f2599f0509c2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x51136d3c78388916; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffc0fcffffcf83; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000288a00003c1c; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8b1414140e0e0e0e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00d6c1c830160048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe3aebaf4df958004; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffe000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100020001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fffffffffffe; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000040000fff8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00007dfd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00007dfd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x20fc000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x20fc000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f0000007f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f0000007f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000003f8000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x10fbe1e2e0000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x10fbe1e2e0000002; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000040004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000040004; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff8000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000400000004000; ++ __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff81007fff0100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff81007fff0100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0003fffc0803fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003fffc0803fff8; ++ __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c +new file mode 100644 +index 000000000..c145f7ff3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c +@@ -0,0 +1,500 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00080000000cc916; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000006fff3; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ffff00ff000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00080005c073c916; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100000007fff3; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00050008000e0010; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0007000800100010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00050008000e0010; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0007000800100010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000002affaa; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff002affaa; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000002affaa; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffd50055; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x002affaa00000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001f0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007f7f00007f00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007f7f00007fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f00ff00000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000abff0000abff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000abff0000abff; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff800000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000070007000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040403fd03fd040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040403fd03fd040; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffd03fd040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040403fd03fd040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001010000010100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000010100; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000086000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00040ff288000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000086000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00040ff288000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5555555555555555; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fc300000fc40; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc008fa01c0090000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f804000c008f404; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc008fa01c0090000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f804000c008f404; ++ *((unsigned long *)&__m256i_op1[3]) = 0x82ff902d83000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f80000082fe0bd9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x82ff902d83000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f80000082fe0bd9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xc0090000c0200060; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc0090000c0200060; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf3f3f3f3f3f3f4f3; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf3f3f3f3f3f3f4f3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000f3f3f4f3; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000f3f3f4f3; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fff8579f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff8579f; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefe01010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefe01010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefe01010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefe01010101; ++ __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000810001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000810001; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010110; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000104000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000104000200; ++ __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c +new file mode 100644 +index 000000000..b5c0fca74 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c +@@ -0,0 +1,636 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000007f007f5; ++ *((unsigned long *)&__m256i_op1[3]) = 0x002e4db200000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000315ac0000d658; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00735278007cf94c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003ed8800031b38; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff8fc000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ff77fff7ff7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7ff77fff7ff7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000002000000022; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000002000000022; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000016600000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000016600000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x7f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000055; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000045; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d0005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x50); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x20); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00550f0000550f00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000015c015c0; ++ *((unsigned long *)&__m256i_result[2]) = 0xc0c0c0cdc0c0c0cd; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xc0c0c0cdc0c0c0cd; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x00f800f800f800f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0018181800181818; ++ *((unsigned long *)&__m256i_result[1]) = 0x00f800f800f800f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0018181800181818; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43e019c657c7d050; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xe8001411edf9c0f8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xe80014fdf0e3e428; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff14; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff10003; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff14; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff10003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefee0e3fefefe00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefee0e3fefefe00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000001fffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000001fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000001fffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000001fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x007f0000007f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x007f0000007f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f780000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f8f7f80000fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f780000ff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x01c601c6fe3afe3a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x01c601c6fe3afe3a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3f00004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f00004040; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f010700c70106; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f010700c70106; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000010211921; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000010211921; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x82ff902d83000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f80000082fe0bd9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x82ff902d83000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f80000082fe0bd9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000080ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000080ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x08000000000000f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x08000000000000f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x001fffffffe00000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020000000000000; ++ __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x4); ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op0[2]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_op0[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op0[0]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op1[2]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03af03af03af03af; ++ *((unsigned long *)&__m256i_op1[0]) = 0x03acfc5303260e81; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c +new file mode 100644 +index 000000000..1d591c35c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c +@@ -0,0 +1,650 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[2]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_result[0]) = 0x6580668200fe0002; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff874dc687870000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffc6cc05c64d960e; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000003f7e3f; ++ *((unsigned long *)&__m256i_result[0]) = 0xff874dc687870000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffba0c05; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000483800; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00ffffff00ffff; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fe37fe3001d001d; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff0000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000008e4bfc4eff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001ffee10000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000008e4bfc4eff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001ffee10000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d000000000d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0000060d0d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d000000000d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0000060d0d; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffff03ffffff07; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffff03ffffff07; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040004000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040404040; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe01fd02fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03fc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000405; ++ *((unsigned long *)&__m256i_result[3]) = 0xfe01fe017e81fd02; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000003fc001fe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfe01fe017e81fd02; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000003fc001fe; ++ __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000010000685e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000020a4ffffbe4f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000003ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000003ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ffffffffffff; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000001ffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000001ffff8000; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[2]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[1]) = 0xfd02fd02fd02fd02; ++ *((unsigned long *)&__m256i_result[0]) = 0xfd02fd02fd02fd02; ++ __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0005fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0x04f004f204f204f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0005fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x04f004f204f204f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000002780; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000002780; ++ __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c +new file mode 100644 +index 000000000..e8696701f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c +@@ -0,0 +1,405 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010101110101011; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1111111211111112; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004444; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x003ffff300000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000001f7f7f; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x9240f24a84b18025; ++ *((unsigned long *)&__m256i_op0[2]) = 0x9240f24a84b18025; ++ *((unsigned long *)&__m256i_op0[1]) = 0xb2c0b341807f8006; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb2c0b341807f8006; ++ *((unsigned long *)&__m256i_result[3]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_result[2]) = 0x009200f200840080; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b200b300800080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00b200b300800080; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ffff0001ffff; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffcb423a587053; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6d46f43e71141b81; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffcb423a584528; ++ *((unsigned long *)&__m256i_op0[0]) = 0x9bdf36c8d78158a1; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000007fffe; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000036a37; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000007fffe; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000004def9; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0889088908810881; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0081010000810100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0889088900810088; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0081010000810100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004448444844084; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000408080004080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004448444804080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000408080004080; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000001d001d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000001d001d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000030003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000030003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000307; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000a0010400a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000a0010400a; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ffff0001ffff; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fffffff3fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fffffff3fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fff00003fff; ++ __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x3fff3fff3fff3fc4; ++ *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x3fff3fff3fff3fc4; ++ __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c +new file mode 100644 +index 000000000..d54991051 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000003868686a20; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0045b8ae81bce1d8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00386a20b8aee1d8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00386a20b8aee1d8; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2020000020200000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0008000001010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x88888a6d0962002e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xdb8a3109fe0f0020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000007fff01fffb; ++ *((unsigned long *)&__m256i_op0[0]) = 0xdb8e20990cce025a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff3400000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff83ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0962002efe0f0020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01fffb8667012d; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffeffeb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffeffeb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fb7afb62; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffeffebfb7afb62; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff010000ff017e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01ae00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000a00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000000a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff017e6b803fc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff017e6b803fc0; ++ __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000781; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000078100000064; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xa1a1a1a1a1a15e5e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xa1a1a1a1a1a15e5e; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0080000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000027; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1716151417161514; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1716151417161514; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1716151417161514; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1716151417161514; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fff0fff0fff0fff; ++ __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c +new file mode 100644 +index 000000000..0fb6483cf +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c +@@ -0,0 +1,680 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffc500000002d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000034; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbfa3e127c147721f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1729c173836edfbe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf91f111808007fb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5ff1f90ffffbf30f; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ff280016; ++ *((unsigned long *)&__m256i_result[2]) = 0xd193a30f94b9b7df; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000001001a; ++ *((unsigned long *)&__m256i_result[0]) = 0xc88840fdf887fd87; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000f; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffc5556aaa8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffc5556aaa8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x555555553f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000007070205; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002020100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000007070205; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002020100; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x36); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x73); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffe01fe01f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffe01fe01f; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffe01fe01f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffe01fe01f; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fe01020b0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fe01020b0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0fff0fff00000020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0fff0fff00000020; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x01fb16ef98f97e90; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x01fb16ef98f97e90; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffa0078fffa0074; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffa2078fffa2074; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffa2078fffa2074; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x01ff01ff01ff01ff; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003e6c0000cb7a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000401000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003e6c0000cb7a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x40000000b000032d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x40000000b000032d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f00ff007f00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fc03fc01fc03fc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fc03fc01fc03fc; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ef0120; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ef0120; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff0120; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000e9ec0000e9ec; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff0120; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000e9ec0000e9ec; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffdd001dffe00020; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffdd001dffe00031; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffdd001dffe00020; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffdd001dffe00031; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x3ff73ff83ff73ff8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0600060000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0600060000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000007fff8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000007fff8; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1e0000001e002000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1e0000001e002000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff3225; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff3225; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1a19181716151413; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1a19181716151413; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000004442403; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000004442403; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x63); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fef0000ffff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fef0000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0xde00fe0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000fe010000fe01; ++ *((unsigned long *)&__m256i_result[1]) = 0xde00fe0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000fe010000fe01; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000007070707; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff07070707; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000007070707; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff07070707; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x03ff000003ff03ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x03ff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x03ff000003ff03ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x03ff000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x0007ffff0007ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000700000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0007ffff0007ffff; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000e000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000e0000000e00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfc003802fc000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x03802fc000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x03802fc000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x5a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x080808000828082f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0808080008280820; ++ *((unsigned long *)&__m256i_op0[1]) = 0x080808000828082f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0808080008280820; ++ *((unsigned long *)&__m256i_op1[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op1[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op1[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00828082f0808080; ++ *((unsigned long *)&__m256i_result[2]) = 0xf18181818132feea; ++ *((unsigned long *)&__m256i_result[1]) = 0x00828082f0808080; ++ *((unsigned long *)&__m256i_result[0]) = 0xf18181818132feea; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x43); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe01fc01fc01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe01fc01fc01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc01000000003fc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc01000000003fc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000126000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2555205ea7bc4020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000126000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2555205ea7bc4020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[2]) = 0x10ffffff10000006; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0fffffff10000006; ++ *((unsigned long *)&__m256i_op1[0]) = 0x10ffffff10000006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000004980008; ++ *((unsigned long *)&__m256i_result[2]) = 0x003ffffffc400000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000004980008; ++ *((unsigned long *)&__m256i_result[0]) = 0x003ffffffc400000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x46); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00f0000000f00010; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0ff00fff0ff10; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0087ff87f807ff87; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x68); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x50); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf007fe76f008fe19; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf08aff01f07cc291; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf007fe76f008fe19; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf08aff01f07cc291; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000003c01ff9; ++ __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c +new file mode 100644 +index 000000000..22e62a3e7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c +@@ -0,0 +1,515 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x40d74f979f99419f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x40d74f979f99419f; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff8080000004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000080000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff8080000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000200000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0000000000080; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000002222; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003ddd80007bbb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x009f00f8007e00f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f007f0081007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0ea85f60984a8555; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00a21ef3246995f3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1189ce8000fa14ed; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0e459089665f40f3; ++ *((unsigned long *)&__m256i_result[3]) = 0x000100f800000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000f800000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000000000010; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffc040ffffc09d; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f7f000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f7f000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100010001; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000ff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7c00000880008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000001000100; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000008; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffff80; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000430207f944; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff010ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000201; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000201; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff01fb0408; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[2]) = 0xf2b180c9fc1fefdc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_result[0]) = 0xf2b180c9fc1fefdc; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6b6b6b6b6b6b6b6b; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d; ++ __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01f010; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01f010; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01f010; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01f010; ++ *((unsigned long *)&__m256i_result[3]) = 0x000078780000f0f1; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000078780000f0f1; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffc00040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffc00040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1080108010060002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1080108010060002; ++ __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c +new file mode 100644 +index 000000000..71f770aff +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0002fffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0002ff7e8286; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff0002fffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0002ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0202000002020202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202000002010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0202000002020202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202000002020000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_result[3]) = 0x0703030307030203; ++ *((unsigned long *)&__m256i_result[2]) = 0x0703030307030203; ++ *((unsigned long *)&__m256i_result[1]) = 0x0703030307030203; ++ *((unsigned long *)&__m256i_result[0]) = 0x0703030307030203; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f3fc6c68787; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f87870000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003f3fc6c68787; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003f3f87870000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010183f95466; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01010101d58efe94; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000101000083f95; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x00001010000d58f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010002000100020; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020000000200000; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000020000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000040000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000040000000000; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_result[2]) = 0x132feea900000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x04e8296f18181818; ++ *((unsigned long *)&__m256i_result[0]) = 0x132feea900000000; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000001200000011a; ++ *((unsigned long *)&__m256i_result[2]) = 0x2040204020402040; ++ *((unsigned long *)&__m256i_result[1]) = 0x000001200000011a; ++ *((unsigned long *)&__m256i_result[0]) = 0x2040204020402040; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffa003e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffb009c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffa003e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffb009c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020004000400040; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020004000400040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020004000400040; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020004000400040; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000800000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffbfffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffbfffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0102020202010202; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0102020202010202; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000000000000; ++ __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c +new file mode 100644 +index 000000000..cbc1de371 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c +@@ -0,0 +1,410 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffff328dfff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6651bfff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202020201010000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000050005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000505; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff820002ff820002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff820002ff820002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00020002ff820002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00020002ff820002; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00020421d7d41124; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00020421d7d41124; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff020000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff020000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fe01fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000007c8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01fe01fe0000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe0000ff01; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f900000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f900000002; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000004843ffdff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00043fff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00043fff00000000; ++ __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1b00e300e4; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1b00e300e4; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1b00e300e4; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1b00e30100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x002000000020ffff; ++ __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffdbff980038ffaf; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffafffe80004fff1; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffdbff980038ffaf; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffafffe80004fff1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000020202020202; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000020202020202; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000e3fec0004fff1; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000e3fec0004fff1; ++ __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c +new file mode 100644 +index 000000000..8fc7a0029 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c +@@ -0,0 +1,455 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x7a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000808000008080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000808000008081; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000081; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x68); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000002a5429; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000801380f380fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000801380f300fb; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f3a40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x42); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf0000000f0000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf0000000f0000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x817f11ed81800ff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000004fc480040; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000004fc480040; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000004fc480040; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000004fc480040; ++ __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0004000404040404; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0004000400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x80208020c22080a7; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x80208020c22080a7; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf80ff20df80ff20; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdfc2ff20df80ffa7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000840100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xbffebffec0febfff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000840100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xbffebffec0febfff; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffc0c0ffffbfc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffc0c0ffffbfc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f0000400d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f0000400d; ++ *((unsigned long *)&__m256i_result[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffe00000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000048; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbfffa004fffd8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003f0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00002fffe8013fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003f0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00002fffe8013fff; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000101000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000101000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00010001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x5a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00b2fe28e4420609; ++ *((unsigned long *)&__m256i_op0[2]) = 0x028da7fe15020000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b2fe28e4420609; ++ *((unsigned long *)&__m256i_op0[0]) = 0x028da7fe15020000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000800000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002000000; ++ __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000003ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000003ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001ffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_w_d (__m256i_op0, __m256i_op1, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0040000000000000; ++ __m256i_out = __lasx_xvsrlrni_w_d (__m256i_op0, __m256i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001200000012; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fc00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fc00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f880f87e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f880f87e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000; ++ __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000081220000812c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000812000008120; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c +new file mode 100644 +index 000000000..fdb0c25f1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c +@@ -0,0 +1,905 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007ffe81fdfe03; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ffe800000000000; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffef000004ea; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1717171717171717; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000607f700000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffe81; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00f9f90079f9f9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x79f9f9f900000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f007f78; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000033007e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000021; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f7f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f00007fff; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000080; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff00000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000002aaad555; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000002aaad555; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff00000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffc00000ffc0ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcfee0fe00ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffcfee0fe00ffe0; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000001fff9fff8; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff2400000000ff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffeffe4fffeff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff6400000000ff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffeff66fffeff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fe0100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdbcbdbcb0000dbcb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000016000000480d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000226200005111; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000016000000480d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1131288800000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1131288800000002; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f3f7f007f1f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f3f7f007f1f; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x007f8080007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff00000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000007ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010800; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0008; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe27fe2821d226278; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfdfdfdfdfdfdfdfd; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe27fe2821d226278; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000000d; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080ff0080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff000000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff000000000080; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007f807f80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000007f7f; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fff0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000feff0001ffb8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff1cff18; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff1cff18; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf3ffffffffeffed; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe013fcf2e015fc38; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe013fd00dff78420; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe013fcf2e015fc38; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe013fd00dff78420; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000003fffc0; ++ __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c +new file mode 100644 +index 000000000..dd3c2c6f6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c +@@ -0,0 +1,1235 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f057f0b7f5b007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000007f007f5; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000001fc000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000c475ceb40000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fb0819280000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x074132a240000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003a0200; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000c9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007fff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x37); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff0ffff0000; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000080008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001ffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001ffffff; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x73); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100010001000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0004000400040004; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1fe01e0000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x22); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xce7ffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xce7ffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6300000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff39ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff39ffffff; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x5e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe8001b72e0001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xb72e8001b72eaf12; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe000247639d9c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xb5308001b72eaf12; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00001fff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00001fff; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x38f7414938f7882f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x38f7414938f78830; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000801380f380fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000801380f300fb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0303030303020000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0303030303020000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x4d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x59); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd04752cdd5543b56; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6906e68064f3d78b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd04752cdd5543b56; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6906e68064f3d78b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff1100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000004560420; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000fff00004542; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ffff00ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000fff00004542; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xdf00000052a00000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x5b7f00ff5b7f00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00c0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0040000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000c0000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000040000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffffe02; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000300000005fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000300000005fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0007fd00000f02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fffeff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ffffffff00; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000018; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000019; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0004000000030000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000400000003c000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x009c3e201e39e7e3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x87c1135043408bba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x009c3e201e39e7e3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x87c1135043408bba; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f5c8f374980; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001b0b1b4b5dd9f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f5c8f374980; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100007f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100007f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007c7fff00007fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00817fff00810000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007c7fff00007fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00817fff00810000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000457d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000b03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000457d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000b03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0f000f000f000f00; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0f000f000f000f00; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007fc0083fc7c007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007fc0083fc7c007; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x42); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00067fff00047fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00027fff000080fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00067fff00047fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00027fff000080fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x067f047f027f0080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x067f047f027f0080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0af57272788754ab; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000005e80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0af57272788754ab; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000005e80; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000f0f0f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f0000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000f0f0f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f0000007f; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0400100004001000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0400100004001000; ++ __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op0[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op0[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000700000008; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000700000008; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x55); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xc07f8000c07f8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xc07f8000c07f8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000fff01fe0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000fff01fe0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fe96fe95; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6afc01000001ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fe96fe95; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6afc01000001ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000404; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0404000004040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4000400040004000; ++ *((unsigned long *)&__m256i_result[2]) = 0x4000400040004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x4000400040004000; ++ *((unsigned long *)&__m256i_result[0]) = 0x4000400040004000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f433c78; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000001ff1; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000001ff1; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x53); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x03fbfffc03fc07fc; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff80000000; ++ __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff003fffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000003fffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffc00fffffc00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffc00fffffc00; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ff007f007f00; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[2]) = 0xc03fc03fc03fc03f; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[0]) = 0xc03fc03fc03fc03f; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff; ++ __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x60); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0004000500040005; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff80007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000fffd0004; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000000f; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000000f; ++ __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c +new file mode 100644 +index 000000000..7848ddd41 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c +@@ -0,0 +1,905 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff80000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000100da000100fd; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001ffe20001fefd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001009a000100fd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001ff640001fefd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007ff90000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000001ff60000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffc2c2ffffc2c2; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x003100310031002f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x003100310031002f; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffefffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000000010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000000000002; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000781e0000f221; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff6f20; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000781e0000f221; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbf00bf00bf00bf00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbf84bf00bf00bf0e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007f7f80007fa3; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f670000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007f7f80007fa3; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f670000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000008; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000008; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000408080c111414; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000800400010006d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000800400010006d; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x02000000fdffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0200000002000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x02000000fdffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000004ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000004ffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3fd1000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff000000ff000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff000000ff000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffb6811fffff80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff97c120000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xdb410010cbe10010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xdb410010cbe10010; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000011ffd97c3; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000019ffdf403; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000011ffd97c3; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x002000000020ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1a1a1a2c1a1a1a2c; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3838383838383838; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffdfffffe00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3838383838383838; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffdfffffe00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000020002000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000020002000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffbffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffbffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc03b000200020002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000001ec020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000001ec020; ++ __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c +new file mode 100644 +index 000000000..b1c16baf4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c +@@ -0,0 +1,1160 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffc00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000020000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000020000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f20; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000009f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00001f41ffffbf00; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000400000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000010000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x5d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf5f5bfbaf5f5bfbe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf5f0bfb8f5d8bfe8; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff5f5c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff5f5c; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[2]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[1]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op0[0]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op1[3]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op1[2]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op1[1]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_op1[0]) = 0x005500550055ffab; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffff6ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffff6ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a09080706050403; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003000200000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003000200000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001010300010102; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000410041; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000df93f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000077843; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000003800000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x73); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8001b72e0001b72e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8001b72eaf12d5f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000247639d9cb530; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8001b72eaf12d5f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe056fd9d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffceba70; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00150015003a402f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x333568ce26dcd055; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00150015003a402f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x333568ce26dcd055; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000007d0d0d0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000007d0d0d0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000098; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000040000ffca; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000800000098; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000040000ff79; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff04ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000008000000a; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000008000000a; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000120e120d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000120e120d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000907; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0016001600160016; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010002000100020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffe; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffe000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffe000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x54); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00030006fa05f20e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00030081bd80f90e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000018; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000018; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x02407a3c00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0d0cf2f30d0cf2f3; ++ *((unsigned long *)&__m256i_op0[1]) = 0x02407a3c00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0d0cf2f30d0cf2f3; ++ *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0020000f0000000f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010000f0000000f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff0fff0fff0f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff0fff0fff0f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffff70156; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x74); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xde00fe0000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000fe010000fe01; ++ *((unsigned long *)&__m256i_op0[1]) = 0xde00fe0000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fe010000fe01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000100010001ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00007ff000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x79); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000070007000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x0e0e0e0e0e0e0e0e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000e0e0e0e0e0e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a1a1a15e5e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a1a1a15e5e; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x45); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0001fffa; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00018069; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0001fffa; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00018069; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000002000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000004000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000004000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000038000000268; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001010101; ++ __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0400000004000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000400; ++ *((unsigned long *)&__m256i_result[1]) = 0x0400000004000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000400; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0080000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x08000000000000f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x08000000000000f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x2000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x2000000000000000; ++ __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x36); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000040000001b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000040000001b; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x41dffbffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f80ffffff808000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f80ffffff808000; ++ __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001e00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000500020002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000700020033; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000500020002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000700020033; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000500020002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000700020033; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000500020002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000700020033; ++ *((unsigned long *)&__m256i_result[3]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x1400080008000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x1400080008000000; ++ __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000001de; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000001c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000001de; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000060000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000060000000; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00003fea0014734d; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003fe900140d85; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00003fea0014734d; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003fe900140d85; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff0000ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff0000ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c +new file mode 100644 +index 000000000..356eb2182 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c +@@ -0,0 +1,965 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x44bb2cd3a35c2fd0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xca355ba46a95e31c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000100ab000500a0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000200b800080124; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001011b000200aa; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00150118008f0091; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f057f0b7f5b007f; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000020000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000020000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ffe7fffeffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffd84900000849; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07fffc670800f086; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000017ffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff0ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000017000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001700080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001700080; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2000200020002000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000005536aaaaac; ++ *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff9fffffffbffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffdaaaaffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000060102150101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000060102150101; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe00000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1cfd000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000003f00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000003f0000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f7f7f0000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff00000089; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffff600000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff000009ec; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffff600000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff000009ec; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8060000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8060000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000010000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff00000001; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff81ff7dffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff81ff7dffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f017ffd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f017ffd; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000007; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000077fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x01ff0020ff1f001f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00011ffb0000bee1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00011ffb0000bee1; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff00000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f08181818; ++ *((unsigned long *)&__m256i_op0[2]) = 0x032feea900000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f08181818; ++ *((unsigned long *)&__m256i_op0[0]) = 0x032feea900000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffc01fc01; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x41cfe01dde000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000013fc03bbc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000013fc03bbc; ++ __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000017f00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f03030000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xdf80df80df80dfff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffdf80dfff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000017fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000017fff; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000017fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff000000017fff; ++ __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff010100000001; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c +new file mode 100644 +index 000000000..116bebbb6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c +@@ -0,0 +1,1130 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7f7f7f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f28306860663e60; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x40d74f979f99419f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff01fd7fff7fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fff7fff; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffe0ffe0ffe0ffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1e1800001e180000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1e18000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000001e18; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffe0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000001e18; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x70); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0383634303836343; ++ *((unsigned long *)&__m256i_result[1]) = 0x1fffffff1fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0383634303836343; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x68); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0036003200360032; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000bf6e0000c916; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000030000fff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x001175f10e4330e8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff8f0842ff29211e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffff8d9ffa7103d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000e00ff00ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f80780000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00001000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00001000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op1[2]) = 0x6aeaeaeaeaeaeaea; ++ *((unsigned long *)&__m256i_op1[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_op1[0]) = 0x6aeaeaeaeaeaeaea; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000003f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000003f0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000fffc0000fee0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000fe000000ffe0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000003; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe17cec8fe08008ac; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe0801f41e0800168; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9240f24a84b18025; ++ *((unsigned long *)&__m256i_op1[2]) = 0x9240f24a84b18025; ++ *((unsigned long *)&__m256i_op1[1]) = 0xb2c0b341807f8006; ++ *((unsigned long *)&__m256i_op1[0]) = 0xb2c0b341807f8006; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000012481e4950; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001658166830; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x77777777f7777777; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf777777777777777; ++ *((unsigned long *)&__m256i_op0[1]) = 0x77777777f7777777; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf777777777777777; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff24; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff24; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404240; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404240; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404240; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404240; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f7f00007f7f; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00010001000c4411; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100044411; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000002800000010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0002000200020018; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0002000200020008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000c0000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000c0000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000040000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_result[2]) = 0x0003030300000300; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003030300000100; ++ *((unsigned long *)&__m256i_result[0]) = 0x0003030300000100; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000800000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000002000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000800000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003fff00003fff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0007fff8000ffff0; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000030007; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007f7f817f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007f7f817f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4ffc3f783fc040c0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3fc03f803fc040c0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0003fbfc0bfbfc03; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0003fbfc0bfbfc03; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff56ff55ff01ff01; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff56ff55ff01ff01; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007f7f7f7f; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa90896a400000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa90896a400000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7f000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7f000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff80017fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff80017fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000000; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff810011; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff810011; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff8180ffff8181; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff8180ffff8181; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000008000ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff81ff81; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000008000ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff81ff81; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffebeeaaefafb; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffebeeaaefafb; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffebeeaaeeeeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x01ffbfff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x03ffffff03ffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x01ffbfff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x03ffffff03ffffff; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x001f001f001f001f; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x61); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0200000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0200000000000000; ++ __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000003030000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000030400; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007000008e700000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007000008e700000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7171717171010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8e8e8e8e8f00ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7171717171010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8e8e8e8e8f00ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[2]) = 0xe2e2e202ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ff00; ++ *((unsigned long *)&__m256i_result[0]) = 0xe2e2e202ffffffff; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc800c800c800c800; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8800c800c800c801; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0003800400038004; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000a800b000a800b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000e0010000e; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000e0010000e; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x4e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x01ff01ff01c0003e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x01ff01ff01c0003e; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0707070707070707; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0018001800180018; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3000300030003000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3000300030003000; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x01fe01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000598; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002cc0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002cc0000; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0002000200010002; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f1d7f7f7f1d7f3b; ++ *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f1d7f7f7f1d7f3b; ++ *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000dfffffff1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000cfffffff3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003f3f00003f3f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000004000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000004000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000004000000080; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000118; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007efffefffefffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff80fffffffffffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007efffefffefffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff80fffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000e3ab0001352b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000e3ab0001352b; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000038ea4d4a; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff00007fff0000; ++ __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000010000005e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000a400ff004f; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00011; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00011; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c +new file mode 100644 +index 000000000..977061097 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c +@@ -0,0 +1,815 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_op0[1]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1515151515151515; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf800f800f800c000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf800f800f800a000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf800f800f800e000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op1[2]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_op1[1]) = 0x5555555536aaaaac; ++ *((unsigned long *)&__m256i_op1[0]) = 0x55555555aaaaaaac; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffc0000fffc0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000200020002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff0004ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff0004ff; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000005be55bd2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbabababababababa; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0404ffff00000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0404040800000010; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_op1[1]) = 0x007f00f8ff7fff80; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff6a9d8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001b00fd0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000019; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000019; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000070700000707; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000009091b1b1212; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000070700000707; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000009091b1b1212; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000027d00f8; ++ *((unsigned long *)&__m256i_op1[2]) = 0x040204660265fe22; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000027d00f8; ++ *((unsigned long *)&__m256i_op1[0]) = 0x040204660265fe22; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xe273e273e273e273; ++ *((unsigned long *)&__m256i_op0[2]) = 0xe273e273e273e273; ++ *((unsigned long *)&__m256i_op0[1]) = 0xe273e273e273e273; ++ *((unsigned long *)&__m256i_op0[0]) = 0xe273e273e273e273; ++ *((unsigned long *)&__m256i_op1[3]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_op1[1]) = 0xd207e90001fb16ef; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc8eab25698f97e90; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001c4e8ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001c4e8ffffffff; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff0000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00007f0200007f02; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00007f0200007f02; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0097011900f4009f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003200d4010f0144; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0097011900f301cd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x010b008800f80153; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff810011; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x3fff8000ffa08004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x3fff8000ffa08004; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fc38fc38; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffbfffa0ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffbfffa0ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff02000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff02000000; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00020001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00020001; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007f7f7f80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000007f007f007f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff0000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4000c08000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000080c000c080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000404; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff88ffc0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff78ffc0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000002000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000002000000000; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001000100800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000200a000020020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000200a000020020; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1c3fc7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002; ++ __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0002000200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0002000200000000; ++ __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000017f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000017f7f7f7f; ++ __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf5fffc00fc000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001001900010019; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0a02041904010019; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001001900010019; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0a02041904010019; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000007b007e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000007b007e; ++ __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c +new file mode 100644 +index 000000000..b55e388b1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c +@@ -0,0 +1,1160 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3133c6409eecf8b0; ++ *((unsigned long *)&__m256i_op0[2]) = 0xddf50db3c617a115; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa432ea5a0913dc8e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x29d403af367b4545; ++ *((unsigned long *)&__m256i_op1[3]) = 0x38a966b31be83ee9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5f6108dc25b8e028; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf41a56e8a20878d7; ++ *((unsigned long *)&__m256i_op1[0]) = 0x683b8b67e20c8ee5; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7ffffffffffff7ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe06df0d7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x988eb37e000fb33d; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffed95be394b1e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000ffff8000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x06f880008000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x800080008000b8f1; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ff00ff00; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff00ff00; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000040100000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000040100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000040100000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000040100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0080200000802000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000f18080010000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000808080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000808; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000020afefb1; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f350104f7ebffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000003fffc1; ++ *((unsigned long *)&__m256i_op1[0]) = 0x005c0003fff9ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000fe6a021; ++ *((unsigned long *)&__m256i_result[1]) = 0x2000000020000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000b8000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000020001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000002020000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000201eff0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000002020000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001fef010; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0010000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0010001000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffd; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff00000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0e0f1192846ff912; ++ *((unsigned long *)&__m256i_op0[2]) = 0x002a0074666a4db9; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0e0f1192846ff912; ++ *((unsigned long *)&__m256i_op0[0]) = 0x002a0074666a4db9; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000018; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000018; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff05407fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001900000019; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0408040800000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0408040800000004; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[2]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[1]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_op1[0]) = 0x07efefefefefefee; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001fbfbfc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001fbfbfc; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x62); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fe01020b0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fe01020b0001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000404040; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000404040; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x68); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x003f003f003f003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000010486048c; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000010486048c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x6f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfe7fffecfe7fffec; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0808080808000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0808080808000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_op0[2]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_op0[1]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_op0[0]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_op1[3]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_op1[2]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_op1[1]) = 0xd010101010101010; ++ *((unsigned long *)&__m256i_op1[0]) = 0xd010101010103218; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff8000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff8000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffe0000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0020000000200000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbc30c40108a45423; ++ *((unsigned long *)&__m256i_op1[2]) = 0xbc263e0e5d00e69f; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbc30c40108a4544b; ++ *((unsigned long *)&__m256i_op1[0]) = 0xbc20e63aa8b9663f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0504080804030405; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0504060904040305; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0504080804030405; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0504060904040305; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000141020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000141020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000080000000800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000010101010; ++ *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000010101010; ++ *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x1010101010001000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[1]) = 0x1010101000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff800000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff800000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000465; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008d00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008d00000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fe70000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc03ae000ffff6000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc600000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_result[2]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_result[0]) = 0x001fe020001fe020; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f010100000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f010100000101; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000200000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0008000000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0008000000000010; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfefefefe3f800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff; ++ __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000080040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000008002d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000008002d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000007f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffbfff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x3f7f7f7f407fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x3f7f7f7f407fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7efefefe80ffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fdfdfe; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x07ffffff07ffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x07ffffff08000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x07ffffff08000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x207f207f207f2000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000207f2000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb68380002001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c08000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb68380002001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c08000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000007fff5b41c0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007fff5b41d0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000007fff5b41c0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000007fff5b41d0; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x59); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010; ++ __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00c00040; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000008000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00c00040; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000008000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0002000200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0002000200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000020002000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000020002000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff010100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000008000000080; ++ __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c +new file mode 100644 +index 000000000..ada72a16a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000000001dc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff24; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff24; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000430207f944; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000bdfef907bc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000bdfef907bc; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffc0; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x2b2b2b2b1bd68080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2a2ad4d4f2d8807e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x2b2b2b2b1bd68080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x2a2ad4d4f2d8807e; ++ *((unsigned long *)&__m256i_result[3]) = 0xd4d5d4d5e42a7f80; ++ *((unsigned long *)&__m256i_result[2]) = 0xd5d62b2c0d287f82; ++ *((unsigned long *)&__m256i_result[1]) = 0xd4d5d4d5e42a7f80; ++ *((unsigned long *)&__m256i_result[0]) = 0xd5d62b2c0d287f82; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffff07b4ffff0707; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000b8070000a787; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffff07b4ffff0707; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000b8070000a787; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffb7650000d496; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001800000018000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffb7650000d496; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001800000018000; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000fc300000fc40; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff000003c0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff000003c0; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_result[2]) = 0xff81001dff9d003b; ++ *((unsigned long *)&__m256i_result[1]) = 0xff81001dff9dff9e; ++ *((unsigned long *)&__m256i_result[0]) = 0xff81001dff9d003b; ++ __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00fd0101; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00fd0101; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00fd0101; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00fd0101; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x40f69fe73c26f4ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffffff9; ++ *((unsigned long *)&__m256i_result[3]) = 0x40f69fe63c26f4f5; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ff7ffff00000007; ++ *((unsigned long *)&__m256i_result[1]) = 0x40f69fe63c26f4f5; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff7ffff00000007; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000ff00007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000ff00007fff; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xfc00000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010800; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffefef800; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffefef800; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x007f0000007f0060; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x007f0000007f0060; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x4393a0a5bc606060; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43b32feea9000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x4393a0a5bc606060; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43b32feea9000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x3eab77367fff4848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x408480007fff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x04e8296f3c611818; ++ *((unsigned long *)&__m256i_result[2]) = 0x032eafee29010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x04e8296f3c611818; ++ *((unsigned long *)&__m256i_result[0]) = 0x032eafee29010000; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001ff91ff100000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001ff91ff100000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000202; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffff7fff80; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001ff91ff0ffdfe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffff7fff80; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001ff91ff0ffdfe; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff; ++ __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c +new file mode 100644 +index 000000000..f42523850 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c +@@ -0,0 +1,695 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf7fdd5ffebe1c9e3; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf7fdd5ffebe1c9e3; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000002467db99; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000003e143852; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000002467db99; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000003e143852; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffdb982466; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7fdd5ffadcd9191; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffdb982466; ++ *((unsigned long *)&__m256i_result[0]) = 0xf7fdd5ffadcd9191; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00ff00fe00ff00fe; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fef0000ffff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fef0000ffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000420080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f880f87e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f880f87e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000; ++ __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0010511c54440438; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0010511c54440438; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000030b8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000030b8; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100002000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000808000008080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000808000008081; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffec; ++ *((unsigned long *)&__m256i_result[3]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000003ff000003ff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0707feb60707b7d0; ++ *((unsigned long *)&__m256i_op1[0]) = 0x45baa7ef6a95a985; ++ *((unsigned long *)&__m256i_result[3]) = 0x38f7414938f7882f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x38f7414938f78830; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffe0001fffe0003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0f0f0ef; ++ *((unsigned long *)&__m256i_op0[2]) = 0xf0f0f0f0f0f0f0ef; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0f0f0ef; ++ *((unsigned long *)&__m256i_op0[0]) = 0xf0f0f0f0f0f0f0ef; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000180007f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffafaf80000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000070f07170; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000070f0f0ef; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000070f07170; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000070f0f0ef; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000032; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000003c000000032; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000004e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff; ++ __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0010100000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0feff00000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0feff00000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x247fe49409620040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x2475cef801f0ffdd; ++ *((unsigned long *)&__m256i_op1[1]) = 0x6580668200fe0002; ++ *((unsigned long *)&__m256i_op1[0]) = 0x419cd5b11c3c5654; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1010100fefefeff0; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0f8f0e8df676f778; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0020000000200000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffdfffffffdfffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffdfffffffdfffff; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffe8ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffe8ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_op1[2]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000c0000005; ++ *((unsigned long *)&__m256i_op1[0]) = 0x21f8c3c4c0000005; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c +new file mode 100644 +index 000000000..3c5e775ff +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c +@@ -0,0 +1,102 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0; ++ __lasx_xvst (__m256i_op0, (unsigned long *)&__m256i_result, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_op0, __m256i_result); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0; ++ __lasx_xvstx (__m256i_op0, (unsigned long *)&__m256i_result, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_op0, __m256i_result); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x8d; ++ *((unsigned long *)&__m256i_out[3]) = 0x0; ++ *((unsigned long *)&__m256i_out[2]) = 0x0; ++ *((unsigned long *)&__m256i_out[1]) = 0x0; ++ *((unsigned long *)&__m256i_out[0]) = 0x0; ++ __lasx_xvstelm_b (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x9100; ++ *((unsigned long *)&__m256i_out[3]) = 0x0; ++ *((unsigned long *)&__m256i_out[2]) = 0x0; ++ *((unsigned long *)&__m256i_out[1]) = 0x0; ++ *((unsigned long *)&__m256i_out[0]) = 0x0; ++ __lasx_xvstelm_h (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0xe9179100; ++ *((unsigned long *)&__m256i_out[3]) = 0x0; ++ *((unsigned long *)&__m256i_out[2]) = 0x0; ++ *((unsigned long *)&__m256i_out[1]) = 0x0; ++ *((unsigned long *)&__m256i_out[0]) = 0x0; ++ __lasx_xvstelm_w (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb; ++ *((unsigned long *)&__m256i_result[3]) = 0x0; ++ *((unsigned long *)&__m256i_result[2]) = 0x0; ++ *((unsigned long *)&__m256i_result[1]) = 0x0; ++ *((unsigned long *)&__m256i_result[0]) = 0x58569d7be9179100; ++ *((unsigned long *)&__m256i_out[3]) = 0x0; ++ *((unsigned long *)&__m256i_out[2]) = 0x0; ++ *((unsigned long *)&__m256i_out[1]) = 0x0; ++ *((unsigned long *)&__m256i_out[0]) = 0x0; ++ __lasx_xvstelm_d (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c +new file mode 100644 +index 000000000..1a7b0e86f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c +@@ -0,0 +1,14 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler-times "xvstelm.w" 8} } */ ++ ++#define LEN 256 ++ ++float a[LEN], b[LEN], c[LEN]; ++ ++void ++test () ++{ ++ for (int i = 0; i < LEN; i += 2) ++ a[i] = b[i] + c[i]; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c +new file mode 100644 +index 000000000..c1de1e8d3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c +@@ -0,0 +1,590 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010100000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffbe20fc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000001cc7ee87; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000010bb83239; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000c409ed87; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0100020001bf1efd; ++ *((unsigned long *)&__m256i_result[2]) = 0x010002001ec8ec88; ++ *((unsigned long *)&__m256i_result[1]) = 0x010002010db9303a; ++ *((unsigned long *)&__m256i_result[0]) = 0x01000200c60aeb88; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[2]) = 0x0101010200000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[0]) = 0x0101010200000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7e00000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000007e1c7e1c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7e00000000000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000040004000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[2]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0100010001000100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x01ffff4300ffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_result[2]) = 0xff0101fd00010100; ++ *((unsigned long *)&__m256i_result[1]) = 0xff00ff003f003f00; ++ *((unsigned long *)&__m256i_result[0]) = 0xff0101fd00010100; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff010000fff9; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff19; ++ *((unsigned long *)&__m256i_result[1]) = 0xff02ff020001fffa; ++ *((unsigned long *)&__m256i_result[0]) = 0x000100010001fffa; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x43d03bfff827ea21; ++ *((unsigned long *)&__m256i_op1[2]) = 0x43dac1f2a3804ff0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x43d03bfff827e9f9; ++ *((unsigned long *)&__m256i_op1[0]) = 0x43e019c657c7d050; ++ *((unsigned long *)&__m256i_result[3]) = 0xbc30c40107d915df; ++ *((unsigned long *)&__m256i_result[2]) = 0xbc263e0e5c80b010; ++ *((unsigned long *)&__m256i_result[1]) = 0xbc30c40107d91607; ++ *((unsigned long *)&__m256i_result[0]) = 0xbc20e63aa8392fb0; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009; ++ *((unsigned long *)&__m256i_result[3]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_result[2]) = 0xff21c241ff21c238; ++ *((unsigned long *)&__m256i_result[1]) = 0xff21c241ff21c241; ++ *((unsigned long *)&__m256i_result[0]) = 0xff21c241ff21c238; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xf7f8f7f8f7f8f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000e000e000e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000e000e000e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000e000e000e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000e000e000e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000e0000000d; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x207f207f207f2000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000207f2000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_result[3]) = 0xdf80df80df80dfff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffdf80dfff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080; ++ __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4; ++ *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c645c5c5c6; ++ *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c5c5c5c5c5; ++ *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c645c5c5c6; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1f0fdf7f3e3b31d4; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_result[2]) = 0x8008000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xe0f02081c1c4ce2c; ++ *((unsigned long *)&__m256i_result[0]) = 0x8008000000000000; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000001c9880; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000001c9880; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffe36780; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000100000001; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00b213171dff0606; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00e9a80014ff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00b213181dff0607; ++ *((unsigned long *)&__m256i_result[2]) = 0x00e9a80114ff0001; ++ *((unsigned long *)&__m256i_result[1]) = 0x00b213181dff0607; ++ *((unsigned long *)&__m256i_result[0]) = 0x00e9a80114ff0001; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fdfdfe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7ffe0001fffe0001; ++ *((unsigned long *)&__m256i_result[2]) = 0x7ffe0001fffeffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000fdfdfe; ++ __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000006f0000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000006f0000007f; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x8080808080808081; ++ *((unsigned long *)&__m256i_result[1]) = 0x8080808080808081; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x017e00ff017e00ff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff017e01fe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[2]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[1]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_op1[0]) = 0xe1616161e1614e60; ++ *((unsigned long *)&__m256i_result[3]) = 0x1f9d9f9d1f9db29f; ++ *((unsigned long *)&__m256i_result[2]) = 0x1f9d9f9d201cb39e; ++ *((unsigned long *)&__m256i_result[1]) = 0x201c9f9d201cb29f; ++ *((unsigned long *)&__m256i_result[0]) = 0x1f9d9f9d201cb39e; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffeffebfb7afb62; ++ *((unsigned long *)&__m256i_op1[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc192181230000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x3e6ce7d9cb7afb62; ++ *((unsigned long *)&__m256i_result[1]) = 0x4010000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x3e6ce7d9cb7afb62; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000013; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffed; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0fffffff0fffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x90007fff90008000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0ffffffe90008000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fffffff80000000; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[2]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[0]) = 0x05ea05ea05ea05ec; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfa15fa15fa15fa14; ++ __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsub_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0505070804040404; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0504070804040404; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff000000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff000000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0504080804030405; ++ *((unsigned long *)&__m256i_result[2]) = 0x0504060904040305; ++ *((unsigned long *)&__m256i_result[1]) = 0x0504080804030405; ++ *((unsigned long *)&__m256i_result[0]) = 0x0504060904040305; ++ __m256i_out = __lasx_xvsub_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00fff8ffc0; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffefff80; ++ __m256i_out = __lasx_xvsub_q (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c +new file mode 100644 +index 000000000..a3c0de6d3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c +@@ -0,0 +1,482 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_result[2]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_result[1]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m256i_result[0]) = 0xe9e9e9e9e9e9e9e9; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9f8f9f8f9f9f900; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9e0; ++ *((unsigned long *)&__m256i_result[1]) = 0xf9f8f9f8f9f9f900; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f900; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000007f; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xefefefefefefefef; ++ *((unsigned long *)&__m256i_result[2]) = 0xefefefefefefefef; ++ *((unsigned long *)&__m256i_result[1]) = 0xefefefefefefef6e; ++ *((unsigned long *)&__m256i_result[0]) = 0xeeeeeeeeeeeeeeee; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[2]) = 0x6aeaeaeaeaeaeaea; ++ *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[0]) = 0x6aeaeaeaeaeaeaea; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf6f6f6f6f6f6f6f6; ++ *((unsigned long *)&__m256i_result[2]) = 0xf6f6f6f6f6f6f6f6; ++ *((unsigned long *)&__m256i_result[1]) = 0xf6f6f6f6f6f6f6f6; ++ *((unsigned long *)&__m256i_result[0]) = 0xf6f6f6f6f6f6f6f6; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000002a54290; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000002a54290; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[2]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[1]) = 0xe7e7e7e7e7e7e7e7; ++ *((unsigned long *)&__m256i_result[0]) = 0xe7e7e7e7e7e7e7e7; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_result[3]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_result[2]) = 0xdbcbdbcbdbcbdbcb; ++ *((unsigned long *)&__m256i_result[1]) = 0xdbcbdbcbecececec; ++ *((unsigned long *)&__m256i_result[0]) = 0xdbcbdbcbdbcbdbcb; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0e0d0c0b0e0d0c0b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0a0908070a090807; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0908070a090807; ++ *((unsigned long *)&__m256i_result[1]) = 0x0a0908070a090807; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0908070a090807; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[1]) = 0xf9f9f9f9f9f9f9f9; ++ *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f9f9; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[2]) = 0xf2f2f2f2f2f2f2f2; ++ *((unsigned long *)&__m256i_result[1]) = 0xf3f3f3f3f3f3f3f3; ++ *((unsigned long *)&__m256i_result[0]) = 0xf2f2f2f2f2f2f2f2; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[2]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb; ++ *((unsigned long *)&__m256i_result[0]) = 0xebebebebebebebeb; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfefefefefdfdfdfd; ++ *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfefefefefdfdfdfd; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xe4e4e4e4e4e4e4e4; ++ *((unsigned long *)&__m256i_result[2]) = 0xe4e4e4e4e4e4e4e4; ++ *((unsigned long *)&__m256i_result[1]) = 0xe4e4e4e4e4e4e4e4; ++ *((unsigned long *)&__m256i_result[0]) = 0xe4e4e4e4e4e4e4e4; ++ __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff7fff7fff7fff7; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff7fff7fff7fff7; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff7fff7fff7fff7; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff7fff7fff7fff7; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000022be22be; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000022be22be; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fffa2bea2be; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe1ffe1229f229f; ++ *((unsigned long *)&__m256i_result[2]) = 0x7fe07fe0a29fa29f; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe1ffe1229f229f; ++ *((unsigned long *)&__m256i_result[0]) = 0x7fe07fe0a29fa29f; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe5ffe5ffe5ffe5; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe5ffe5ffe5ffe5; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe5ffe5ffe5ffe5; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe5ffe5ffe5ffe5; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff1fff1fff1fff1; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffcfffcfffcfffc; ++ __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000004fb; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffef000004ea; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffefffffffef; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffecffffffec; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffecffffffec; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000018; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000018; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff30000000b; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff3fffffff3; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffff5fffffff5; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff5fffffff5; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffff5fffffff5; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff5fffffff5; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe5ffffffe5; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe5ffffffe5; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffeaffffffea; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffeaffffffea; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffeaffffffea; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffeaffffffea; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5d20a0895d20a089; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[1]) = 0x5d20a0895d20a089; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe8ffffffe8; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe8ffffffe8; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe8ffffffe8; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffcfffffffc; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffcfffffffc; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffcfffffffc; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffcfffffffc; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffeb683007ffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c0df5b41cf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffeb683007ffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c0df5b41cf; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffe97a1df5b41b0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffeb664007ffd61; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffe97a1df5b41b0; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe7ffffffe7; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe7ffffffe7; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000400000003ffb; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000400100004001; ++ *((unsigned long *)&__m256i_result[3]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003ff000003ff0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00003fef00003fea; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003ff000003ff0; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffe4ffffffe4; ++ __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffefb; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffefb; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffc0008001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffffc0008001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffc0008001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffffc0008001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffffc0007fe9; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffffc0007fe9; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffffc0007fe9; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffffc0007fe9; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff6; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffee; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe6; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffe6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffe6; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffe6; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffe1; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100080; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000010006d; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000006d; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000010006d; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffef; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffee; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff4; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffed; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffed; ++ __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c +new file mode 100644 +index 000000000..caa72ca61 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000003fff3fff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000003f0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000030007; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffb10001ff8f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001004c0001ff87; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffb10001ff8f; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001004c0001ff87; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff7; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ff02ff80fede; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ff02ff80fede; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffe00800022; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100040; ++ *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffc0; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fff0ffc0; ++ *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffc0; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0ffc0; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fff00017fff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000001d0000001c; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000001d0000001c; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000001d0000001c; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000001d0000001c; ++ __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffeff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffeff00000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010203; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffcfa; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000102; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000fffffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffefd; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000008080809; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000008080809; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000008080809; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000008080809; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000300000003; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffd; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffd; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffd; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffd; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffff1cff18; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffff1cff1c; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffff1cff18; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000001400; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000003c01ff9; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffec00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffc3fe007; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffc3fe007; ++ __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010000; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000010100000102; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010100000102; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x007fffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x007fffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007fffff007fffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00c200c200c200c2; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00c200c200c200bb; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffbdff3cffbdff44; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffbdff3cffbdff44; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c +new file mode 100644 +index 000000000..57d883c04 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c +@@ -0,0 +1,440 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000017; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000001700170017; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000007f0000007f; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000007f0000007f; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff80ff01ff80; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff800000007e; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0043030300400300; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0043030300400300; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0043030300400100; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0043030300400100; ++ *((unsigned long *)&__m256i_result[3]) = 0xffdd001dffe00020; ++ *((unsigned long *)&__m256i_result[2]) = 0xffdd001dffe00031; ++ *((unsigned long *)&__m256i_result[1]) = 0xffdd001dffe00020; ++ *((unsigned long *)&__m256i_result[0]) = 0xffdd001dffe00031; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000001ffe2000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x001fe020001fe020; ++ *((unsigned long *)&__m256i_result[3]) = 0x00ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe1ffe0ffe1ffe0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00ff0020ff1f001f; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe1ffe0ffe1ffe0; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffee00ba; ++ *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80008000fff98000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00ff00fffff500ba; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00ff00fffff500ba; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000047000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000004efffe00; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000047000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff0100ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000fffc0000fffc; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000fffc0000fffc; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001504f4c4b2361; ++ *((unsigned long *)&__m256i_op0[2]) = 0x303338a48f374969; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001504f4c4b2361; ++ *((unsigned long *)&__m256i_op0[0]) = 0x303338a48f374969; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0f0f0f0; ++ *((unsigned long *)&__m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_result[2]) = 0xffff47b4ffff5879; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000504fffff3271; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff47b4ffff5879; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000; ++ __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffbf4; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000006; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000308; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000010100000102; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000010100000102; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffefd; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffefd; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40; ++ *((unsigned long *)&__m256i_op1[3]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x80000000ffff8c80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x80000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x80000000fff0e400; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000000f1a40; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000003effe1; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000003effe1; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000003effe1; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000003effe1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00010001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffff7; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffff7; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0002; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0002; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0002; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0002; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000; ++ __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x5d20a0a15d20a0a1; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000010000000001; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffeffffff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffeffffff00; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000100; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000100; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0040000000000003; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001; ++ __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c +new file mode 100644 +index 000000000..1687729d3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c +@@ -0,0 +1,695 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19; ++ *((unsigned long *)&__m256i_result[3]) = 0xffe4ffe6ffe5ffe6; ++ *((unsigned long *)&__m256i_result[2]) = 0xffe4ffe6ffe5ffe6; ++ *((unsigned long *)&__m256i_result[1]) = 0xffe4ffe6ffe5ffe6; ++ *((unsigned long *)&__m256i_result[0]) = 0xffe4ffe6ffe5ffe6; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x017e01fe01fe01fe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0586060601fe0202; ++ *((unsigned long *)&__m256i_op1[1]) = 0x017e01fe01fe0000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0586060601fe0004; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffbfffafffffffe; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffbfffaffff0000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m256i_result[1]) = 0xffefffef00000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffefffefffefffef; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000000003ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff01; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffff90ffffff80; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffff6; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100008000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100008000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100007fff; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009; ++ *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000043efffff8000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000043efffff8000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x003f60041f636003; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000003f00001f63; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000003f00001f63; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000400080ffc080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff80ff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff80ff; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xc3030000ff800000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[2]) = 0x00003cfc0000006f; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m256i_result[0]) = 0x00003cfc0000006f; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op1[2]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff6361; ++ *((unsigned long *)&__m256i_op1[0]) = 0x4d0a902890b800dc; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffb2f600006f48; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffb2f600006f48; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000001fffe; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000001fffe; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000060000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000000000017e; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000020202020; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000020202020; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ff40; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ff0100090040; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000b7; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffefff80; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff02; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff02; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000700000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe00; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffe00; ++ __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1e17ffffd0fc6772; ++ *((unsigned long *)&__m256i_op1[2]) = 0x1e17ffffebf6ded2; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1e17ffffd0fc6772; ++ *((unsigned long *)&__m256i_op1[0]) = 0x1e17ffffebf6ded2; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xe1e800002f03988d; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xe1e800002f03988d; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x9cffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x6300000000000001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x6300000000000001; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000808; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f8; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff1fff1fff1fff1; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x000e000e000e000e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000e000e000e000e; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0a0a000000000a0a; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0a0a000000000a0a; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c +new file mode 100644 +index 000000000..8d6ed92a1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c +@@ -0,0 +1,620 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000020001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffcc8000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000007dfdff4b; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff3400000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ff83ff01; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000ff010000ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000ff010000ff01; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000ff010000ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000ff010000ff01; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_result[2]) = 0xfff0fff0fff0fff0; ++ *((unsigned long *)&__m256i_result[1]) = 0xfff0fff0ff01ff01; ++ *((unsigned long *)&__m256i_result[0]) = 0xfff0fff0fff0fff0; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op1[2]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_op1[1]) = 0xdf80df80df80df80; ++ *((unsigned long *)&__m256i_op1[0]) = 0xdfc2df80df80df87; ++ *((unsigned long *)&__m256i_result[3]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256i_result[2]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256i_result[1]) = 0xff21ff21ff21ff21; ++ *((unsigned long *)&__m256i_result[0]) = 0xff21ff21ff21ff21; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002; ++ *((unsigned long *)&__m256i_op0[2]) = 0x4079808280057efe; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007ffcfcfd020202; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x004000800080007e; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000fc00fd0002; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m256i_result[3]) = 0xff01ff0100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xff01ff0100000000; ++ *((unsigned long *)&__m256i_result[1]) = 0xff01ff0100000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xff01ff0100000000; ++ __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe00007f000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffff000100000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fff; ++ *((unsigned long *)&__m256i_result[1]) = 0xffff7fff00007f00; ++ *((unsigned long *)&__m256i_result[0]) = 0xffff000100007fff; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000b8f81b8c840e4; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_result[2]) = 0xfffffff5ffff4738; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffb3b4; ++ *((unsigned long *)&__m256i_result[0]) = 0xfffffff5ffff4738; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x00009fff9ffffd80; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff20010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x00009fff9ffffd80; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff20010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00002080df5b41cf; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00002080df5b41cf; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000009fff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff40a6; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000009fff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff40a6; ++ __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00007fffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00007fffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff8001; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x020afefb08140000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0003fffc00060000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff0001ff02; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff020afefc; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x000000000003fefd; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32; ++ *((unsigned long *)&__m256i_op1[3]) = 0x1514151415141514; ++ *((unsigned long *)&__m256i_op1[2]) = 0x151415141514e335; ++ *((unsigned long *)&__m256i_op1[1]) = 0x1514151415141514; ++ *((unsigned long *)&__m256i_op1[0]) = 0x151415141514e335; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000e9ece9ec; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000e9ece9ec; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0008000800080008; ++ *((unsigned long *)&__m256i_op0[2]) = 0x000c005e000c0029; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0004005600040020; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000300000002; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000060008; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000000c005b; ++ *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffe0000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000040053; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xf7f8f7f8f800f800; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00003f784000ff80; ++ *((unsigned long *)&__m256i_op0[1]) = 0xf7f8f7f84000fff9; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00003f784000ff80; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000003f78; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000f7f8f7f8; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000003f78; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000070007000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7000700070007000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff8fff9000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff8fff9000; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffff37b737b8; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff77b737b8; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffff37b737b8; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff77b737b8; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e; ++ *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff457db03f; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000457db03e; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff457db03f; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001; ++ *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m256i_op1[3]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[2]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_op1[1]) = 0x000050504c4c2362; ++ *((unsigned long *)&__m256i_op1[0]) = 0x000b2673a90896a4; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0xffffafafb3b3dc9d; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0xffffafafb3b3dc9d; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0100000100000001; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000008050501; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000008050501; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000029170; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000001fff000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000001fff000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000090b0906; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c +new file mode 100644 +index 000000000..18b36c873 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c +@@ -0,0 +1,185 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_op1[2]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_op1[1]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_result[2]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_result[1]) = 0x7be2468acf15f39c; ++ *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xff3eff3eff3eff3e; ++ *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x00c100c100c100c1; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x00c100c100c100c1; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[2]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[2]) = 0x0100000001000100; ++ *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[0]) = 0x0100000001000100; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000f91; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f90; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000f90; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1; ++ *((unsigned long *)&__m256i_op0[2]) = 0x6040190d20227a78; ++ *((unsigned long *)&__m256i_op0[1]) = 0x132feeabd2d33b38; ++ *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x9fe7fffffffff32e; ++ *((unsigned long *)&__m256i_result[2]) = 0x6040190ddfdd8587; ++ *((unsigned long *)&__m256i_result[1]) = 0xecd011542d2cc4c7; ++ *((unsigned long *)&__m256i_result[0]) = 0x6040190dffffffff; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000; ++ __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c +new file mode 100644 +index 000000000..8fd6298f7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c +@@ -0,0 +1,163 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result; ++ __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result; ++ __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000005e02; ++ *((unsigned long *)&__m256i_result[3]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[2]) = 0xc2c2c2c2c2c29cc0; ++ *((unsigned long *)&__m256i_result[1]) = 0xc2c2c2c2c2c2c2c2; ++ *((unsigned long *)&__m256i_result[0]) = 0xc2c2c2c2c2c29cc0; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xc2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x1616161616161616; ++ *((unsigned long *)&__m256i_op0[2]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x7ffe16167f161616; ++ *((unsigned long *)&__m256i_op0[0]) = 0x161616167fffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xc7c7c7c7c7c7c7c7; ++ *((unsigned long *)&__m256i_result[2]) = 0xc7c7c7c7ae2e2e2e; ++ *((unsigned long *)&__m256i_result[1]) = 0xae2fc7c7aec7c7c7; ++ *((unsigned long *)&__m256i_result[0]) = 0xc7c7c7c7ae2e2e2e; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xd1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_result[2]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_result[1]) = 0x5353535353535353; ++ *((unsigned long *)&__m256i_result[0]) = 0x5353535353535353; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x53); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x6d6d6d6d6d6d6d6d; ++ *((unsigned long *)&__m256i_result[2]) = 0x6d6d6d6d6d6d6d6d; ++ *((unsigned long *)&__m256i_result[1]) = 0x6d6d6d6d6d6d6d6d; ++ *((unsigned long *)&__m256i_result[0]) = 0x6d6d6d6d6d6d6d6d; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_result[2]) = 0x8e8e8e8e8e8e8e8e; ++ *((unsigned long *)&__m256i_result[1]) = 0x7171717171717171; ++ *((unsigned long *)&__m256i_result[0]) = 0x8e8e8e8e8e8e8e8e; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x71); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[2]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575; ++ *((unsigned long *)&__m256i_result[0]) = 0x7575757575757575; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x75); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xa4a4a4a4a4a4a4a4; ++ *((unsigned long *)&__m256i_result[2]) = 0xa4a4a4a4a4a4a4a4; ++ *((unsigned long *)&__m256i_result[1]) = 0xa4a4a4a4a4a4a4a4; ++ *((unsigned long *)&__m256i_result[0]) = 0xa4a4a4a4a4a4a4a4; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xa4); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_result[2]) = 0xa1a1a1a15e5e5e5e; ++ *((unsigned long *)&__m256i_result[1]) = 0xa1a1a1a1a1a1a1a1; ++ *((unsigned long *)&__m256i_result[0]) = 0xa1a1a1a15e5e5e5e; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xa1); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m256i_result[3]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256i_result[2]) = 0x8d8d72728d8d8d8d; ++ *((unsigned long *)&__m256i_result[1]) = 0x8d8d72728d8d7272; ++ *((unsigned long *)&__m256i_result[0]) = 0x8d8d72728d8d8d8d; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x8d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_result[3]) = 0xb3b3b3b3b3b3b3b3; ++ *((unsigned long *)&__m256i_result[2]) = 0xb3b3b3b3b3b3b3b3; ++ *((unsigned long *)&__m256i_result[1]) = 0xb3b3b3b3b3b3b3b3; ++ *((unsigned long *)&__m256i_result[0]) = 0xb3b3b3b3b3b3b3b3; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x4c); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x007f0000ff807f81; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff800000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x007f0000ff807f81; ++ *((unsigned long *)&__m256i_result[3]) = 0x5d5d5d5d5d22a2a2; ++ *((unsigned long *)&__m256i_result[2]) = 0xa2dda2a25d22dd23; ++ *((unsigned long *)&__m256i_result[1]) = 0x5d5d5d5d5d22a2a2; ++ *((unsigned long *)&__m256i_result[0]) = 0xa2dda2a25d22dd23; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xa2); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m256i_result[3]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256i_result[2]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256i_result[1]) = 0xd3d3d3d3d3d3d3d3; ++ *((unsigned long *)&__m256i_result[0]) = 0xd3d3d3d3d3d3d3d3; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xd3); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[2]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m256i_op0[0]) = 0xfa15fa15fa15fa14; ++ *((unsigned long *)&__m256i_result[3]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_result[2]) = 0x8768876887688769; ++ *((unsigned long *)&__m256i_result[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m256i_result[0]) = 0x8768876887688769; ++ __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x7d); ++ ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp +new file mode 100644 +index 000000000..2c37aa91d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp +@@ -0,0 +1,42 @@ ++#Copyright(C) 2023 Free Software Foundation, Inc. ++ ++#This program is free software; you can redistribute it and / or modify ++#it under the terms of the GNU General Public License as published by ++#the Free Software Foundation; either version 3 of the License, or ++#(at your option) any later version. ++# ++#This program is distributed in the hope that it will be useful, ++#but WITHOUT ANY WARRANTY; without even the implied warranty of ++#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the ++#GNU General Public License for more details. ++# ++#You should have received a copy of the GNU General Public License ++#along with GCC; see the file COPYING3.If not see ++# . ++ ++#GCC testsuite that uses the `dg.exp' driver. ++ ++#Exit immediately if this isn't a LoongArch target. ++if ![istarget loongarch*-*-*] then { ++ return ++} ++ ++#Load support procs. ++load_lib gcc-dg.exp ++ ++#If a testcase doesn't have special options, use these. ++global DEFAULT_CFLAGS ++if ![info exists DEFAULT_CFLAGS] then { ++ set DEFAULT_CFLAGS " " ++} ++ ++#Initialize `dg'. ++dg-init ++ ++#Main loop. ++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lsx/*.\[cS\]]] \ ++ " -mlsx" $DEFAULT_CFLAGS ++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lasx/*.\[cS\]]] \ ++ " -mlasx" $DEFAULT_CFLAGS ++# All done. ++dg-finish +diff --git a/gcc/testsuite/gcc.target/loongarch/lsx-builtin.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c +similarity index 51% +rename from gcc/testsuite/gcc.target/loongarch/lsx-builtin.c +rename to gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c +index 296869dc5..13013114d 100644 +--- a/gcc/testsuite/gcc.target/loongarch/lsx-builtin.c ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c +@@ -718,744 +718,3611 @@ + /* { dg-final { scan-assembler-times "lsx_vrepli_h:.*vrepli\\.h.*lsx_vrepli_h" 1 } } */ + /* { dg-final { scan-assembler-times "lsx_vrepli_w:.*vrepli\\.w.*lsx_vrepli_w" 1 } } */ + +-typedef signed char v16i8 __attribute__ ((vector_size(16), aligned(16))); +-typedef signed char v16i8_b __attribute__ ((vector_size(16), aligned(1))); +-typedef unsigned char v16u8 __attribute__ ((vector_size(16), aligned(16))); +-typedef unsigned char v16u8_b __attribute__ ((vector_size(16), aligned(1))); +-typedef short v8i16 __attribute__ ((vector_size(16), aligned(16))); +-typedef short v8i16_h __attribute__ ((vector_size(16), aligned(2))); +-typedef unsigned short v8u16 __attribute__ ((vector_size(16), aligned(16))); +-typedef unsigned short v8u16_h __attribute__ ((vector_size(16), aligned(2))); +-typedef int v4i32 __attribute__ ((vector_size(16), aligned(16))); +-typedef int v4i32_w __attribute__ ((vector_size(16), aligned(4))); +-typedef unsigned int v4u32 __attribute__ ((vector_size(16), aligned(16))); +-typedef unsigned int v4u32_w __attribute__ ((vector_size(16), aligned(4))); +-typedef long long v2i64 __attribute__ ((vector_size(16), aligned(16))); +-typedef long long v2i64_d __attribute__ ((vector_size(16), aligned(8))); +-typedef unsigned long long v2u64 __attribute__ ((vector_size(16), aligned(16))); +-typedef unsigned long long v2u64_d __attribute__ ((vector_size(16), aligned(8))); +-typedef float v4f32 __attribute__ ((vector_size(16), aligned(16))); +-typedef float v4f32_w __attribute__ ((vector_size(16), aligned(4))); +-typedef double v2f64 __attribute__ ((vector_size(16), aligned(16))); +-typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8))); ++typedef signed char v16i8 __attribute__ ((vector_size (16), aligned (16))); ++typedef signed char v16i8_b __attribute__ ((vector_size (16), aligned (1))); ++typedef unsigned char v16u8 __attribute__ ((vector_size (16), aligned (16))); ++typedef unsigned char v16u8_b __attribute__ ((vector_size (16), aligned (1))); ++typedef short v8i16 __attribute__ ((vector_size (16), aligned (16))); ++typedef short v8i16_h __attribute__ ((vector_size (16), aligned (2))); ++typedef unsigned short v8u16 __attribute__ ((vector_size (16), aligned (16))); ++typedef unsigned short v8u16_h __attribute__ ((vector_size (16), aligned (2))); ++typedef int v4i32 __attribute__ ((vector_size (16), aligned (16))); ++typedef int v4i32_w __attribute__ ((vector_size (16), aligned (4))); ++typedef unsigned int v4u32 __attribute__ ((vector_size (16), aligned (16))); ++typedef unsigned int v4u32_w __attribute__ ((vector_size (16), aligned (4))); ++typedef long long v2i64 __attribute__ ((vector_size (16), aligned (16))); ++typedef long long v2i64_d __attribute__ ((vector_size (16), aligned (8))); ++typedef unsigned long long v2u64 ++ __attribute__ ((vector_size (16), aligned (16))); ++typedef unsigned long long v2u64_d ++ __attribute__ ((vector_size (16), aligned (8))); ++typedef float v4f32 __attribute__ ((vector_size (16), aligned (16))); ++typedef float v4f32_w __attribute__ ((vector_size (16), aligned (4))); ++typedef double v2f64 __attribute__ ((vector_size (16), aligned (16))); ++typedef double v2f64_d __attribute__ ((vector_size (16), aligned (8))); + +-typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__)); ++typedef long long __m128i ++ __attribute__ ((__vector_size__ (16), __may_alias__)); + typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__)); + typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__)); + +-v16i8 __lsx_vsll_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsll_b(_1, _2);} +-v8i16 __lsx_vsll_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsll_h(_1, _2);} +-v4i32 __lsx_vsll_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsll_w(_1, _2);} +-v2i64 __lsx_vsll_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsll_d(_1, _2);} +-v16i8 __lsx_vslli_b(v16i8 _1){return __builtin_lsx_vslli_b(_1, 1);} +-v8i16 __lsx_vslli_h(v8i16 _1){return __builtin_lsx_vslli_h(_1, 1);} +-v4i32 __lsx_vslli_w(v4i32 _1){return __builtin_lsx_vslli_w(_1, 1);} +-v2i64 __lsx_vslli_d(v2i64 _1){return __builtin_lsx_vslli_d(_1, 1);} +-v16i8 __lsx_vsra_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsra_b(_1, _2);} +-v8i16 __lsx_vsra_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsra_h(_1, _2);} +-v4i32 __lsx_vsra_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsra_w(_1, _2);} +-v2i64 __lsx_vsra_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsra_d(_1, _2);} +-v16i8 __lsx_vsrai_b(v16i8 _1){return __builtin_lsx_vsrai_b(_1, 1);} +-v8i16 __lsx_vsrai_h(v8i16 _1){return __builtin_lsx_vsrai_h(_1, 1);} +-v4i32 __lsx_vsrai_w(v4i32 _1){return __builtin_lsx_vsrai_w(_1, 1);} +-v2i64 __lsx_vsrai_d(v2i64 _1){return __builtin_lsx_vsrai_d(_1, 1);} +-v16i8 __lsx_vsrar_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrar_b(_1, _2);} +-v8i16 __lsx_vsrar_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrar_h(_1, _2);} +-v4i32 __lsx_vsrar_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrar_w(_1, _2);} +-v2i64 __lsx_vsrar_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrar_d(_1, _2);} +-v16i8 __lsx_vsrari_b(v16i8 _1){return __builtin_lsx_vsrari_b(_1, 1);} +-v8i16 __lsx_vsrari_h(v8i16 _1){return __builtin_lsx_vsrari_h(_1, 1);} +-v4i32 __lsx_vsrari_w(v4i32 _1){return __builtin_lsx_vsrari_w(_1, 1);} +-v2i64 __lsx_vsrari_d(v2i64 _1){return __builtin_lsx_vsrari_d(_1, 1);} +-v16i8 __lsx_vsrl_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrl_b(_1, _2);} +-v8i16 __lsx_vsrl_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrl_h(_1, _2);} +-v4i32 __lsx_vsrl_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrl_w(_1, _2);} +-v2i64 __lsx_vsrl_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrl_d(_1, _2);} +-v16i8 __lsx_vsrli_b(v16i8 _1){return __builtin_lsx_vsrli_b(_1, 1);} +-v8i16 __lsx_vsrli_h(v8i16 _1){return __builtin_lsx_vsrli_h(_1, 1);} +-v4i32 __lsx_vsrli_w(v4i32 _1){return __builtin_lsx_vsrli_w(_1, 1);} +-v2i64 __lsx_vsrli_d(v2i64 _1){return __builtin_lsx_vsrli_d(_1, 1);} +-v16i8 __lsx_vsrlr_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrlr_b(_1, _2);} +-v8i16 __lsx_vsrlr_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlr_h(_1, _2);} +-v4i32 __lsx_vsrlr_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlr_w(_1, _2);} +-v2i64 __lsx_vsrlr_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlr_d(_1, _2);} +-v16i8 __lsx_vsrlri_b(v16i8 _1){return __builtin_lsx_vsrlri_b(_1, 1);} +-v8i16 __lsx_vsrlri_h(v8i16 _1){return __builtin_lsx_vsrlri_h(_1, 1);} +-v4i32 __lsx_vsrlri_w(v4i32 _1){return __builtin_lsx_vsrlri_w(_1, 1);} +-v2i64 __lsx_vsrlri_d(v2i64 _1){return __builtin_lsx_vsrlri_d(_1, 1);} +-v16u8 __lsx_vbitclr_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitclr_b(_1, _2);} +-v8u16 __lsx_vbitclr_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vbitclr_h(_1, _2);} +-v4u32 __lsx_vbitclr_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vbitclr_w(_1, _2);} +-v2u64 __lsx_vbitclr_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vbitclr_d(_1, _2);} +-v16u8 __lsx_vbitclri_b(v16u8 _1){return __builtin_lsx_vbitclri_b(_1, 1);} +-v8u16 __lsx_vbitclri_h(v8u16 _1){return __builtin_lsx_vbitclri_h(_1, 1);} +-v4u32 __lsx_vbitclri_w(v4u32 _1){return __builtin_lsx_vbitclri_w(_1, 1);} +-v2u64 __lsx_vbitclri_d(v2u64 _1){return __builtin_lsx_vbitclri_d(_1, 1);} +-v16u8 __lsx_vbitset_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitset_b(_1, _2);} +-v8u16 __lsx_vbitset_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vbitset_h(_1, _2);} +-v4u32 __lsx_vbitset_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vbitset_w(_1, _2);} +-v2u64 __lsx_vbitset_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vbitset_d(_1, _2);} +-v16u8 __lsx_vbitseti_b(v16u8 _1){return __builtin_lsx_vbitseti_b(_1, 1);} +-v8u16 __lsx_vbitseti_h(v8u16 _1){return __builtin_lsx_vbitseti_h(_1, 1);} +-v4u32 __lsx_vbitseti_w(v4u32 _1){return __builtin_lsx_vbitseti_w(_1, 1);} +-v2u64 __lsx_vbitseti_d(v2u64 _1){return __builtin_lsx_vbitseti_d(_1, 1);} +-v16u8 __lsx_vbitrev_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitrev_b(_1, _2);} +-v8u16 __lsx_vbitrev_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vbitrev_h(_1, _2);} +-v4u32 __lsx_vbitrev_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vbitrev_w(_1, _2);} +-v2u64 __lsx_vbitrev_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vbitrev_d(_1, _2);} +-v16u8 __lsx_vbitrevi_b(v16u8 _1){return __builtin_lsx_vbitrevi_b(_1, 1);} +-v8u16 __lsx_vbitrevi_h(v8u16 _1){return __builtin_lsx_vbitrevi_h(_1, 1);} +-v4u32 __lsx_vbitrevi_w(v4u32 _1){return __builtin_lsx_vbitrevi_w(_1, 1);} +-v2u64 __lsx_vbitrevi_d(v2u64 _1){return __builtin_lsx_vbitrevi_d(_1, 1);} +-v16i8 __lsx_vadd_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vadd_b(_1, _2);} +-v8i16 __lsx_vadd_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vadd_h(_1, _2);} +-v4i32 __lsx_vadd_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vadd_w(_1, _2);} +-v2i64 __lsx_vadd_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vadd_d(_1, _2);} +-v16i8 __lsx_vaddi_bu(v16i8 _1){return __builtin_lsx_vaddi_bu(_1, 1);} +-v8i16 __lsx_vaddi_hu(v8i16 _1){return __builtin_lsx_vaddi_hu(_1, 1);} +-v4i32 __lsx_vaddi_wu(v4i32 _1){return __builtin_lsx_vaddi_wu(_1, 1);} +-v2i64 __lsx_vaddi_du(v2i64 _1){return __builtin_lsx_vaddi_du(_1, 1);} +-v16i8 __lsx_vsub_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsub_b(_1, _2);} +-v8i16 __lsx_vsub_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsub_h(_1, _2);} +-v4i32 __lsx_vsub_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsub_w(_1, _2);} +-v2i64 __lsx_vsub_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsub_d(_1, _2);} +-v16i8 __lsx_vsubi_bu(v16i8 _1){return __builtin_lsx_vsubi_bu(_1, 1);} +-v8i16 __lsx_vsubi_hu(v8i16 _1){return __builtin_lsx_vsubi_hu(_1, 1);} +-v4i32 __lsx_vsubi_wu(v4i32 _1){return __builtin_lsx_vsubi_wu(_1, 1);} +-v2i64 __lsx_vsubi_du(v2i64 _1){return __builtin_lsx_vsubi_du(_1, 1);} +-v16i8 __lsx_vmax_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmax_b(_1, _2);} +-v8i16 __lsx_vmax_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmax_h(_1, _2);} +-v4i32 __lsx_vmax_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmax_w(_1, _2);} +-v2i64 __lsx_vmax_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmax_d(_1, _2);} +-v16i8 __lsx_vmaxi_b(v16i8 _1){return __builtin_lsx_vmaxi_b(_1, 1);} +-v8i16 __lsx_vmaxi_h(v8i16 _1){return __builtin_lsx_vmaxi_h(_1, 1);} +-v4i32 __lsx_vmaxi_w(v4i32 _1){return __builtin_lsx_vmaxi_w(_1, 1);} +-v2i64 __lsx_vmaxi_d(v2i64 _1){return __builtin_lsx_vmaxi_d(_1, 1);} +-v16u8 __lsx_vmax_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmax_bu(_1, _2);} +-v8u16 __lsx_vmax_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmax_hu(_1, _2);} +-v4u32 __lsx_vmax_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmax_wu(_1, _2);} +-v2u64 __lsx_vmax_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmax_du(_1, _2);} +-v16u8 __lsx_vmaxi_bu(v16u8 _1){return __builtin_lsx_vmaxi_bu(_1, 1);} +-v8u16 __lsx_vmaxi_hu(v8u16 _1){return __builtin_lsx_vmaxi_hu(_1, 1);} +-v4u32 __lsx_vmaxi_wu(v4u32 _1){return __builtin_lsx_vmaxi_wu(_1, 1);} +-v2u64 __lsx_vmaxi_du(v2u64 _1){return __builtin_lsx_vmaxi_du(_1, 1);} +-v16i8 __lsx_vmin_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmin_b(_1, _2);} +-v8i16 __lsx_vmin_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmin_h(_1, _2);} +-v4i32 __lsx_vmin_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmin_w(_1, _2);} +-v2i64 __lsx_vmin_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmin_d(_1, _2);} +-v16i8 __lsx_vmini_b(v16i8 _1){return __builtin_lsx_vmini_b(_1, 1);} +-v8i16 __lsx_vmini_h(v8i16 _1){return __builtin_lsx_vmini_h(_1, 1);} +-v4i32 __lsx_vmini_w(v4i32 _1){return __builtin_lsx_vmini_w(_1, 1);} +-v2i64 __lsx_vmini_d(v2i64 _1){return __builtin_lsx_vmini_d(_1, 1);} +-v16u8 __lsx_vmin_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmin_bu(_1, _2);} +-v8u16 __lsx_vmin_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmin_hu(_1, _2);} +-v4u32 __lsx_vmin_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmin_wu(_1, _2);} +-v2u64 __lsx_vmin_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmin_du(_1, _2);} +-v16u8 __lsx_vmini_bu(v16u8 _1){return __builtin_lsx_vmini_bu(_1, 1);} +-v8u16 __lsx_vmini_hu(v8u16 _1){return __builtin_lsx_vmini_hu(_1, 1);} +-v4u32 __lsx_vmini_wu(v4u32 _1){return __builtin_lsx_vmini_wu(_1, 1);} +-v2u64 __lsx_vmini_du(v2u64 _1){return __builtin_lsx_vmini_du(_1, 1);} +-v16i8 __lsx_vseq_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vseq_b(_1, _2);} +-v8i16 __lsx_vseq_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vseq_h(_1, _2);} +-v4i32 __lsx_vseq_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vseq_w(_1, _2);} +-v2i64 __lsx_vseq_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vseq_d(_1, _2);} +-v16i8 __lsx_vseqi_b(v16i8 _1){return __builtin_lsx_vseqi_b(_1, 1);} +-v8i16 __lsx_vseqi_h(v8i16 _1){return __builtin_lsx_vseqi_h(_1, 1);} +-v4i32 __lsx_vseqi_w(v4i32 _1){return __builtin_lsx_vseqi_w(_1, 1);} +-v2i64 __lsx_vseqi_d(v2i64 _1){return __builtin_lsx_vseqi_d(_1, 1);} +-v16i8 __lsx_vslti_b(v16i8 _1){return __builtin_lsx_vslti_b(_1, 1);} +-v16i8 __lsx_vslt_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vslt_b(_1, _2);} +-v8i16 __lsx_vslt_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vslt_h(_1, _2);} +-v4i32 __lsx_vslt_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vslt_w(_1, _2);} +-v2i64 __lsx_vslt_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vslt_d(_1, _2);} +-v8i16 __lsx_vslti_h(v8i16 _1){return __builtin_lsx_vslti_h(_1, 1);} +-v4i32 __lsx_vslti_w(v4i32 _1){return __builtin_lsx_vslti_w(_1, 1);} +-v2i64 __lsx_vslti_d(v2i64 _1){return __builtin_lsx_vslti_d(_1, 1);} +-v16i8 __lsx_vslt_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vslt_bu(_1, _2);} +-v8i16 __lsx_vslt_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vslt_hu(_1, _2);} +-v4i32 __lsx_vslt_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vslt_wu(_1, _2);} +-v2i64 __lsx_vslt_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vslt_du(_1, _2);} +-v16i8 __lsx_vslti_bu(v16u8 _1){return __builtin_lsx_vslti_bu(_1, 1);} +-v8i16 __lsx_vslti_hu(v8u16 _1){return __builtin_lsx_vslti_hu(_1, 1);} +-v4i32 __lsx_vslti_wu(v4u32 _1){return __builtin_lsx_vslti_wu(_1, 1);} +-v2i64 __lsx_vslti_du(v2u64 _1){return __builtin_lsx_vslti_du(_1, 1);} +-v16i8 __lsx_vsle_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsle_b(_1, _2);} +-v8i16 __lsx_vsle_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsle_h(_1, _2);} +-v4i32 __lsx_vsle_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsle_w(_1, _2);} +-v2i64 __lsx_vsle_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsle_d(_1, _2);} +-v16i8 __lsx_vslei_b(v16i8 _1){return __builtin_lsx_vslei_b(_1, 1);} +-v8i16 __lsx_vslei_h(v8i16 _1){return __builtin_lsx_vslei_h(_1, 1);} +-v4i32 __lsx_vslei_w(v4i32 _1){return __builtin_lsx_vslei_w(_1, 1);} +-v2i64 __lsx_vslei_d(v2i64 _1){return __builtin_lsx_vslei_d(_1, 1);} +-v16i8 __lsx_vsle_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsle_bu(_1, _2);} +-v8i16 __lsx_vsle_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsle_hu(_1, _2);} +-v4i32 __lsx_vsle_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsle_wu(_1, _2);} +-v2i64 __lsx_vsle_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsle_du(_1, _2);} +-v16i8 __lsx_vslei_bu(v16u8 _1){return __builtin_lsx_vslei_bu(_1, 1);} +-v8i16 __lsx_vslei_hu(v8u16 _1){return __builtin_lsx_vslei_hu(_1, 1);} +-v4i32 __lsx_vslei_wu(v4u32 _1){return __builtin_lsx_vslei_wu(_1, 1);} +-v2i64 __lsx_vslei_du(v2u64 _1){return __builtin_lsx_vslei_du(_1, 1);} +-v16i8 __lsx_vsat_b(v16i8 _1){return __builtin_lsx_vsat_b(_1, 1);} +-v8i16 __lsx_vsat_h(v8i16 _1){return __builtin_lsx_vsat_h(_1, 1);} +-v4i32 __lsx_vsat_w(v4i32 _1){return __builtin_lsx_vsat_w(_1, 1);} +-v2i64 __lsx_vsat_d(v2i64 _1){return __builtin_lsx_vsat_d(_1, 1);} +-v16u8 __lsx_vsat_bu(v16u8 _1){return __builtin_lsx_vsat_bu(_1, 1);} +-v8u16 __lsx_vsat_hu(v8u16 _1){return __builtin_lsx_vsat_hu(_1, 1);} +-v4u32 __lsx_vsat_wu(v4u32 _1){return __builtin_lsx_vsat_wu(_1, 1);} +-v2u64 __lsx_vsat_du(v2u64 _1){return __builtin_lsx_vsat_du(_1, 1);} +-v16i8 __lsx_vadda_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vadda_b(_1, _2);} +-v8i16 __lsx_vadda_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vadda_h(_1, _2);} +-v4i32 __lsx_vadda_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vadda_w(_1, _2);} +-v2i64 __lsx_vadda_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vadda_d(_1, _2);} +-v16i8 __lsx_vsadd_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsadd_b(_1, _2);} +-v8i16 __lsx_vsadd_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsadd_h(_1, _2);} +-v4i32 __lsx_vsadd_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsadd_w(_1, _2);} +-v2i64 __lsx_vsadd_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsadd_d(_1, _2);} +-v16u8 __lsx_vsadd_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsadd_bu(_1, _2);} +-v8u16 __lsx_vsadd_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsadd_hu(_1, _2);} +-v4u32 __lsx_vsadd_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsadd_wu(_1, _2);} +-v2u64 __lsx_vsadd_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsadd_du(_1, _2);} +-v16i8 __lsx_vavg_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vavg_b(_1, _2);} +-v8i16 __lsx_vavg_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vavg_h(_1, _2);} +-v4i32 __lsx_vavg_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vavg_w(_1, _2);} +-v2i64 __lsx_vavg_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vavg_d(_1, _2);} +-v16u8 __lsx_vavg_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vavg_bu(_1, _2);} +-v8u16 __lsx_vavg_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vavg_hu(_1, _2);} +-v4u32 __lsx_vavg_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vavg_wu(_1, _2);} +-v2u64 __lsx_vavg_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vavg_du(_1, _2);} +-v16i8 __lsx_vavgr_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vavgr_b(_1, _2);} +-v8i16 __lsx_vavgr_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vavgr_h(_1, _2);} +-v4i32 __lsx_vavgr_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vavgr_w(_1, _2);} +-v2i64 __lsx_vavgr_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vavgr_d(_1, _2);} +-v16u8 __lsx_vavgr_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vavgr_bu(_1, _2);} +-v8u16 __lsx_vavgr_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vavgr_hu(_1, _2);} +-v4u32 __lsx_vavgr_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vavgr_wu(_1, _2);} +-v2u64 __lsx_vavgr_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vavgr_du(_1, _2);} +-v16i8 __lsx_vssub_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vssub_b(_1, _2);} +-v8i16 __lsx_vssub_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssub_h(_1, _2);} +-v4i32 __lsx_vssub_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssub_w(_1, _2);} +-v2i64 __lsx_vssub_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssub_d(_1, _2);} +-v16u8 __lsx_vssub_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vssub_bu(_1, _2);} +-v8u16 __lsx_vssub_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vssub_hu(_1, _2);} +-v4u32 __lsx_vssub_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vssub_wu(_1, _2);} +-v2u64 __lsx_vssub_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vssub_du(_1, _2);} +-v16i8 __lsx_vabsd_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vabsd_b(_1, _2);} +-v8i16 __lsx_vabsd_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vabsd_h(_1, _2);} +-v4i32 __lsx_vabsd_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vabsd_w(_1, _2);} +-v2i64 __lsx_vabsd_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vabsd_d(_1, _2);} +-v16u8 __lsx_vabsd_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vabsd_bu(_1, _2);} +-v8u16 __lsx_vabsd_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vabsd_hu(_1, _2);} +-v4u32 __lsx_vabsd_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vabsd_wu(_1, _2);} +-v2u64 __lsx_vabsd_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vabsd_du(_1, _2);} +-v16i8 __lsx_vmul_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmul_b(_1, _2);} +-v8i16 __lsx_vmul_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmul_h(_1, _2);} +-v4i32 __lsx_vmul_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmul_w(_1, _2);} +-v2i64 __lsx_vmul_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmul_d(_1, _2);} +-v16i8 __lsx_vmadd_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmadd_b(_1, _2, _3);} +-v8i16 __lsx_vmadd_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmadd_h(_1, _2, _3);} +-v4i32 __lsx_vmadd_w(v4i32 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmadd_w(_1, _2, _3);} +-v2i64 __lsx_vmadd_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmadd_d(_1, _2, _3);} +-v16i8 __lsx_vmsub_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmsub_b(_1, _2, _3);} +-v8i16 __lsx_vmsub_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmsub_h(_1, _2, _3);} +-v4i32 __lsx_vmsub_w(v4i32 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmsub_w(_1, _2, _3);} +-v2i64 __lsx_vmsub_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmsub_d(_1, _2, _3);} +-v16i8 __lsx_vdiv_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vdiv_b(_1, _2);} +-v8i16 __lsx_vdiv_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vdiv_h(_1, _2);} +-v4i32 __lsx_vdiv_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vdiv_w(_1, _2);} +-v2i64 __lsx_vdiv_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vdiv_d(_1, _2);} +-v16u8 __lsx_vdiv_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vdiv_bu(_1, _2);} +-v8u16 __lsx_vdiv_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vdiv_hu(_1, _2);} +-v4u32 __lsx_vdiv_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vdiv_wu(_1, _2);} +-v2u64 __lsx_vdiv_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vdiv_du(_1, _2);} +-v8i16 __lsx_vhaddw_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vhaddw_h_b(_1, _2);} +-v4i32 __lsx_vhaddw_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vhaddw_w_h(_1, _2);} +-v2i64 __lsx_vhaddw_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vhaddw_d_w(_1, _2);} +-v8u16 __lsx_vhaddw_hu_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vhaddw_hu_bu(_1, _2);} +-v4u32 __lsx_vhaddw_wu_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vhaddw_wu_hu(_1, _2);} +-v2u64 __lsx_vhaddw_du_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vhaddw_du_wu(_1, _2);} +-v8i16 __lsx_vhsubw_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vhsubw_h_b(_1, _2);} +-v4i32 __lsx_vhsubw_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vhsubw_w_h(_1, _2);} +-v2i64 __lsx_vhsubw_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vhsubw_d_w(_1, _2);} +-v8i16 __lsx_vhsubw_hu_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vhsubw_hu_bu(_1, _2);} +-v4i32 __lsx_vhsubw_wu_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vhsubw_wu_hu(_1, _2);} +-v2i64 __lsx_vhsubw_du_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vhsubw_du_wu(_1, _2);} +-v16i8 __lsx_vmod_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmod_b(_1, _2);} +-v8i16 __lsx_vmod_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmod_h(_1, _2);} +-v4i32 __lsx_vmod_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmod_w(_1, _2);} +-v2i64 __lsx_vmod_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmod_d(_1, _2);} +-v16u8 __lsx_vmod_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmod_bu(_1, _2);} +-v8u16 __lsx_vmod_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmod_hu(_1, _2);} +-v4u32 __lsx_vmod_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmod_wu(_1, _2);} +-v2u64 __lsx_vmod_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmod_du(_1, _2);} +-v16i8 __lsx_vreplve_b(v16i8 _1, int _2){return __builtin_lsx_vreplve_b(_1, _2);} +-v8i16 __lsx_vreplve_h(v8i16 _1, int _2){return __builtin_lsx_vreplve_h(_1, _2);} +-v4i32 __lsx_vreplve_w(v4i32 _1, int _2){return __builtin_lsx_vreplve_w(_1, _2);} +-v2i64 __lsx_vreplve_d(v2i64 _1, int _2){return __builtin_lsx_vreplve_d(_1, _2);} +-v16i8 __lsx_vreplvei_b(v16i8 _1){return __builtin_lsx_vreplvei_b(_1, 1);} +-v8i16 __lsx_vreplvei_h(v8i16 _1){return __builtin_lsx_vreplvei_h(_1, 1);} +-v4i32 __lsx_vreplvei_w(v4i32 _1){return __builtin_lsx_vreplvei_w(_1, 1);} +-v2i64 __lsx_vreplvei_d(v2i64 _1){return __builtin_lsx_vreplvei_d(_1, 1);} +-v16i8 __lsx_vpickev_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpickev_b(_1, _2);} +-v8i16 __lsx_vpickev_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpickev_h(_1, _2);} +-v4i32 __lsx_vpickev_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpickev_w(_1, _2);} +-v2i64 __lsx_vpickev_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpickev_d(_1, _2);} +-v16i8 __lsx_vpickod_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpickod_b(_1, _2);} +-v8i16 __lsx_vpickod_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpickod_h(_1, _2);} +-v4i32 __lsx_vpickod_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpickod_w(_1, _2);} +-v2i64 __lsx_vpickod_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpickod_d(_1, _2);} +-v16i8 __lsx_vilvh_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vilvh_b(_1, _2);} +-v8i16 __lsx_vilvh_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vilvh_h(_1, _2);} +-v4i32 __lsx_vilvh_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vilvh_w(_1, _2);} +-v2i64 __lsx_vilvh_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vilvh_d(_1, _2);} +-v16i8 __lsx_vilvl_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vilvl_b(_1, _2);} +-v8i16 __lsx_vilvl_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vilvl_h(_1, _2);} +-v4i32 __lsx_vilvl_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vilvl_w(_1, _2);} +-v2i64 __lsx_vilvl_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vilvl_d(_1, _2);} +-v16i8 __lsx_vpackev_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpackev_b(_1, _2);} +-v8i16 __lsx_vpackev_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpackev_h(_1, _2);} +-v4i32 __lsx_vpackev_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpackev_w(_1, _2);} +-v2i64 __lsx_vpackev_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpackev_d(_1, _2);} +-v16i8 __lsx_vpackod_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vpackod_b(_1, _2);} +-v8i16 __lsx_vpackod_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vpackod_h(_1, _2);} +-v4i32 __lsx_vpackod_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpackod_w(_1, _2);} +-v2i64 __lsx_vpackod_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vpackod_d(_1, _2);} +-v8i16 __lsx_vshuf_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vshuf_h(_1, _2, _3);} +-v4i32 __lsx_vshuf_w(v4i32 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vshuf_w(_1, _2, _3);} +-v2i64 __lsx_vshuf_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vshuf_d(_1, _2, _3);} +-v16u8 __lsx_vand_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vand_v(_1, _2);} +-v16u8 __lsx_vandi_b(v16u8 _1){return __builtin_lsx_vandi_b(_1, 1);} +-v16u8 __lsx_vor_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vor_v(_1, _2);} +-v16u8 __lsx_vori_b(v16u8 _1){return __builtin_lsx_vori_b(_1, 1);} +-v16u8 __lsx_vnor_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vnor_v(_1, _2);} +-v16u8 __lsx_vnori_b(v16u8 _1){return __builtin_lsx_vnori_b(_1, 1);} +-v16u8 __lsx_vxor_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vxor_v(_1, _2);} +-v16u8 __lsx_vxori_b(v16u8 _1){return __builtin_lsx_vxori_b(_1, 1);} +-v16u8 __lsx_vbitsel_v(v16u8 _1, v16u8 _2, v16u8 _3){return __builtin_lsx_vbitsel_v(_1, _2, _3);} +-v16u8 __lsx_vbitseli_b(v16u8 _1, v16u8 _2){return __builtin_lsx_vbitseli_b(_1, _2, 1);} +-v16i8 __lsx_vshuf4i_b(v16i8 _1){return __builtin_lsx_vshuf4i_b(_1, 1);} +-v8i16 __lsx_vshuf4i_h(v8i16 _1){return __builtin_lsx_vshuf4i_h(_1, 1);} +-v4i32 __lsx_vshuf4i_w(v4i32 _1){return __builtin_lsx_vshuf4i_w(_1, 1);} +-v16i8 __lsx_vreplgr2vr_b(int _1){return __builtin_lsx_vreplgr2vr_b(_1);} +-v8i16 __lsx_vreplgr2vr_h(int _1){return __builtin_lsx_vreplgr2vr_h(_1);} +-v4i32 __lsx_vreplgr2vr_w(int _1){return __builtin_lsx_vreplgr2vr_w(_1);} +-v2i64 __lsx_vreplgr2vr_d(long _1){return __builtin_lsx_vreplgr2vr_d(_1);} +-v16i8 __lsx_vpcnt_b(v16i8 _1){return __builtin_lsx_vpcnt_b(_1);} +-v8i16 __lsx_vpcnt_h(v8i16 _1){return __builtin_lsx_vpcnt_h(_1);} +-v4i32 __lsx_vpcnt_w(v4i32 _1){return __builtin_lsx_vpcnt_w(_1);} +-v2i64 __lsx_vpcnt_d(v2i64 _1){return __builtin_lsx_vpcnt_d(_1);} +-v16i8 __lsx_vclo_b(v16i8 _1){return __builtin_lsx_vclo_b(_1);} +-v8i16 __lsx_vclo_h(v8i16 _1){return __builtin_lsx_vclo_h(_1);} +-v4i32 __lsx_vclo_w(v4i32 _1){return __builtin_lsx_vclo_w(_1);} +-v2i64 __lsx_vclo_d(v2i64 _1){return __builtin_lsx_vclo_d(_1);} +-v16i8 __lsx_vclz_b(v16i8 _1){return __builtin_lsx_vclz_b(_1);} +-v8i16 __lsx_vclz_h(v8i16 _1){return __builtin_lsx_vclz_h(_1);} +-v4i32 __lsx_vclz_w(v4i32 _1){return __builtin_lsx_vclz_w(_1);} +-v2i64 __lsx_vclz_d(v2i64 _1){return __builtin_lsx_vclz_d(_1);} +-int __lsx_vpickve2gr_b(v16i8 _1){return __builtin_lsx_vpickve2gr_b(_1, 1);} +-int __lsx_vpickve2gr_h(v8i16 _1){return __builtin_lsx_vpickve2gr_h(_1, 1);} +-int __lsx_vpickve2gr_w(v4i32 _1){return __builtin_lsx_vpickve2gr_w(_1, 1);} +-long __lsx_vpickve2gr_d(v2i64 _1){return __builtin_lsx_vpickve2gr_d(_1, 1);} +-unsigned int __lsx_vpickve2gr_bu(v16i8 _1){return __builtin_lsx_vpickve2gr_bu(_1, 1);} +-unsigned int __lsx_vpickve2gr_hu(v8i16 _1){return __builtin_lsx_vpickve2gr_hu(_1, 1);} +-unsigned int __lsx_vpickve2gr_wu(v4i32 _1){return __builtin_lsx_vpickve2gr_wu(_1, 1);} +-unsigned long int __lsx_vpickve2gr_du(v2i64 _1){return __builtin_lsx_vpickve2gr_du(_1, 1);} +-v16i8 __lsx_vinsgr2vr_b(v16i8 _1){return __builtin_lsx_vinsgr2vr_b(_1, 1, 1);} +-v8i16 __lsx_vinsgr2vr_h(v8i16 _1){return __builtin_lsx_vinsgr2vr_h(_1, 1, 1);} +-v4i32 __lsx_vinsgr2vr_w(v4i32 _1){return __builtin_lsx_vinsgr2vr_w(_1, 1, 1);} +-v2i64 __lsx_vinsgr2vr_d(v2i64 _1){return __builtin_lsx_vinsgr2vr_d(_1, 1, 1);} +-v4f32 __lsx_vfadd_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfadd_s(_1, _2);} +-v2f64 __lsx_vfadd_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfadd_d(_1, _2);} +-v4f32 __lsx_vfsub_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfsub_s(_1, _2);} +-v2f64 __lsx_vfsub_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfsub_d(_1, _2);} +-v4f32 __lsx_vfmul_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmul_s(_1, _2);} +-v2f64 __lsx_vfmul_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmul_d(_1, _2);} +-v4f32 __lsx_vfdiv_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfdiv_s(_1, _2);} +-v2f64 __lsx_vfdiv_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfdiv_d(_1, _2);} +-v8i16 __lsx_vfcvt_h_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcvt_h_s(_1, _2);} +-v4f32 __lsx_vfcvt_s_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcvt_s_d(_1, _2);} +-v4f32 __lsx_vfmin_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmin_s(_1, _2);} +-v2f64 __lsx_vfmin_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmin_d(_1, _2);} +-v4f32 __lsx_vfmina_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmina_s(_1, _2);} +-v2f64 __lsx_vfmina_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmina_d(_1, _2);} +-v4f32 __lsx_vfmax_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmax_s(_1, _2);} +-v2f64 __lsx_vfmax_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmax_d(_1, _2);} +-v4f32 __lsx_vfmaxa_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfmaxa_s(_1, _2);} +-v2f64 __lsx_vfmaxa_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfmaxa_d(_1, _2);} +-v4i32 __lsx_vfclass_s(v4f32 _1){return __builtin_lsx_vfclass_s(_1);} +-v2i64 __lsx_vfclass_d(v2f64 _1){return __builtin_lsx_vfclass_d(_1);} +-v4f32 __lsx_vfsqrt_s(v4f32 _1){return __builtin_lsx_vfsqrt_s(_1);} +-v2f64 __lsx_vfsqrt_d(v2f64 _1){return __builtin_lsx_vfsqrt_d(_1);} +-v4f32 __lsx_vfrecip_s(v4f32 _1){return __builtin_lsx_vfrecip_s(_1);} +-v2f64 __lsx_vfrecip_d(v2f64 _1){return __builtin_lsx_vfrecip_d(_1);} +-v4f32 __lsx_vfrint_s(v4f32 _1){return __builtin_lsx_vfrint_s(_1);} +-v2f64 __lsx_vfrint_d(v2f64 _1){return __builtin_lsx_vfrint_d(_1);} +-v4f32 __lsx_vfrsqrt_s(v4f32 _1){return __builtin_lsx_vfrsqrt_s(_1);} +-v2f64 __lsx_vfrsqrt_d(v2f64 _1){return __builtin_lsx_vfrsqrt_d(_1);} +-v4f32 __lsx_vflogb_s(v4f32 _1){return __builtin_lsx_vflogb_s(_1);} +-v2f64 __lsx_vflogb_d(v2f64 _1){return __builtin_lsx_vflogb_d(_1);} +-v4f32 __lsx_vfcvth_s_h(v8i16 _1){return __builtin_lsx_vfcvth_s_h(_1);} +-v2f64 __lsx_vfcvth_d_s(v4f32 _1){return __builtin_lsx_vfcvth_d_s(_1);} +-v4f32 __lsx_vfcvtl_s_h(v8i16 _1){return __builtin_lsx_vfcvtl_s_h(_1);} +-v2f64 __lsx_vfcvtl_d_s(v4f32 _1){return __builtin_lsx_vfcvtl_d_s(_1);} +-v4i32 __lsx_vftint_w_s(v4f32 _1){return __builtin_lsx_vftint_w_s(_1);} +-v2i64 __lsx_vftint_l_d(v2f64 _1){return __builtin_lsx_vftint_l_d(_1);} +-v4u32 __lsx_vftint_wu_s(v4f32 _1){return __builtin_lsx_vftint_wu_s(_1);} +-v2u64 __lsx_vftint_lu_d(v2f64 _1){return __builtin_lsx_vftint_lu_d(_1);} +-v4i32 __lsx_vftintrz_w_s(v4f32 _1){return __builtin_lsx_vftintrz_w_s(_1);} +-v2i64 __lsx_vftintrz_l_d(v2f64 _1){return __builtin_lsx_vftintrz_l_d(_1);} +-v4u32 __lsx_vftintrz_wu_s(v4f32 _1){return __builtin_lsx_vftintrz_wu_s(_1);} +-v2u64 __lsx_vftintrz_lu_d(v2f64 _1){return __builtin_lsx_vftintrz_lu_d(_1);} +-v4f32 __lsx_vffint_s_w(v4i32 _1){return __builtin_lsx_vffint_s_w(_1);} +-v2f64 __lsx_vffint_d_l(v2i64 _1){return __builtin_lsx_vffint_d_l(_1);} +-v4f32 __lsx_vffint_s_wu(v4u32 _1){return __builtin_lsx_vffint_s_wu(_1);} +-v2f64 __lsx_vffint_d_lu(v2u64 _1){return __builtin_lsx_vffint_d_lu(_1);} +-v16u8 __lsx_vandn_v(v16u8 _1, v16u8 _2){return __builtin_lsx_vandn_v(_1, _2);} +-v16i8 __lsx_vneg_b(v16i8 _1){return __builtin_lsx_vneg_b(_1);} +-v8i16 __lsx_vneg_h(v8i16 _1){return __builtin_lsx_vneg_h(_1);} +-v4i32 __lsx_vneg_w(v4i32 _1){return __builtin_lsx_vneg_w(_1);} +-v2i64 __lsx_vneg_d(v2i64 _1){return __builtin_lsx_vneg_d(_1);} +-v16i8 __lsx_vmuh_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmuh_b(_1, _2);} +-v8i16 __lsx_vmuh_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmuh_h(_1, _2);} +-v4i32 __lsx_vmuh_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmuh_w(_1, _2);} +-v2i64 __lsx_vmuh_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmuh_d(_1, _2);} +-v16u8 __lsx_vmuh_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmuh_bu(_1, _2);} +-v8u16 __lsx_vmuh_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmuh_hu(_1, _2);} +-v4u32 __lsx_vmuh_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmuh_wu(_1, _2);} +-v2u64 __lsx_vmuh_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmuh_du(_1, _2);} +-v8i16 __lsx_vsllwil_h_b(v16i8 _1){return __builtin_lsx_vsllwil_h_b(_1, 1);} +-v4i32 __lsx_vsllwil_w_h(v8i16 _1){return __builtin_lsx_vsllwil_w_h(_1, 1);} +-v2i64 __lsx_vsllwil_d_w(v4i32 _1){return __builtin_lsx_vsllwil_d_w(_1, 1);} +-v8u16 __lsx_vsllwil_hu_bu(v16u8 _1){return __builtin_lsx_vsllwil_hu_bu(_1, 1);} +-v4u32 __lsx_vsllwil_wu_hu(v8u16 _1){return __builtin_lsx_vsllwil_wu_hu(_1, 1);} +-v2u64 __lsx_vsllwil_du_wu(v4u32 _1){return __builtin_lsx_vsllwil_du_wu(_1, 1);} +-v16i8 __lsx_vsran_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsran_b_h(_1, _2);} +-v8i16 __lsx_vsran_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsran_h_w(_1, _2);} +-v4i32 __lsx_vsran_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsran_w_d(_1, _2);} +-v16i8 __lsx_vssran_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssran_b_h(_1, _2);} +-v8i16 __lsx_vssran_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssran_h_w(_1, _2);} +-v4i32 __lsx_vssran_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssran_w_d(_1, _2);} +-v16u8 __lsx_vssran_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssran_bu_h(_1, _2);} +-v8u16 __lsx_vssran_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssran_hu_w(_1, _2);} +-v4u32 __lsx_vssran_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssran_wu_d(_1, _2);} +-v16i8 __lsx_vsrarn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrarn_b_h(_1, _2);} +-v8i16 __lsx_vsrarn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrarn_h_w(_1, _2);} +-v4i32 __lsx_vsrarn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrarn_w_d(_1, _2);} +-v16i8 __lsx_vssrarn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrarn_b_h(_1, _2);} +-v8i16 __lsx_vssrarn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrarn_h_w(_1, _2);} +-v4i32 __lsx_vssrarn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrarn_w_d(_1, _2);} +-v16u8 __lsx_vssrarn_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssrarn_bu_h(_1, _2);} +-v8u16 __lsx_vssrarn_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssrarn_hu_w(_1, _2);} +-v4u32 __lsx_vssrarn_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssrarn_wu_d(_1, _2);} +-v16i8 __lsx_vsrln_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrln_b_h(_1, _2);} +-v8i16 __lsx_vsrln_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrln_h_w(_1, _2);} +-v4i32 __lsx_vsrln_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrln_w_d(_1, _2);} +-v16u8 __lsx_vssrln_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssrln_bu_h(_1, _2);} +-v8u16 __lsx_vssrln_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssrln_hu_w(_1, _2);} +-v4u32 __lsx_vssrln_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssrln_wu_d(_1, _2);} +-v16i8 __lsx_vsrlrn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlrn_b_h(_1, _2);} +-v8i16 __lsx_vsrlrn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlrn_h_w(_1, _2);} +-v4i32 __lsx_vsrlrn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlrn_w_d(_1, _2);} +-v16u8 __lsx_vssrlrn_bu_h(v8u16 _1, v8u16 _2){return __builtin_lsx_vssrlrn_bu_h(_1, _2);} +-v8u16 __lsx_vssrlrn_hu_w(v4u32 _1, v4u32 _2){return __builtin_lsx_vssrlrn_hu_w(_1, _2);} +-v4u32 __lsx_vssrlrn_wu_d(v2u64 _1, v2u64 _2){return __builtin_lsx_vssrlrn_wu_d(_1, _2);} +-v16i8 __lsx_vfrstpi_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vfrstpi_b(_1, _2, 1);} +-v8i16 __lsx_vfrstpi_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vfrstpi_h(_1, _2, 1);} +-v16i8 __lsx_vfrstp_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vfrstp_b(_1, _2, _3);} +-v8i16 __lsx_vfrstp_h(v8i16 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vfrstp_h(_1, _2, _3);} +-v2i64 __lsx_vshuf4i_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vshuf4i_d(_1, _2, 1);} +-v16i8 __lsx_vbsrl_v(v16i8 _1){return __builtin_lsx_vbsrl_v(_1, 1);} +-v16i8 __lsx_vbsll_v(v16i8 _1){return __builtin_lsx_vbsll_v(_1, 1);} +-v16i8 __lsx_vextrins_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vextrins_b(_1, _2, 1);} +-v8i16 __lsx_vextrins_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vextrins_h(_1, _2, 1);} +-v4i32 __lsx_vextrins_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vextrins_w(_1, _2, 1);} +-v2i64 __lsx_vextrins_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vextrins_d(_1, _2, 1);} +-v16i8 __lsx_vmskltz_b(v16i8 _1){return __builtin_lsx_vmskltz_b(_1);} +-v8i16 __lsx_vmskltz_h(v8i16 _1){return __builtin_lsx_vmskltz_h(_1);} +-v4i32 __lsx_vmskltz_w(v4i32 _1){return __builtin_lsx_vmskltz_w(_1);} +-v2i64 __lsx_vmskltz_d(v2i64 _1){return __builtin_lsx_vmskltz_d(_1);} +-v16i8 __lsx_vsigncov_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsigncov_b(_1, _2);} +-v8i16 __lsx_vsigncov_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsigncov_h(_1, _2);} +-v4i32 __lsx_vsigncov_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsigncov_w(_1, _2);} +-v2i64 __lsx_vsigncov_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsigncov_d(_1, _2);} +-v4f32 __lsx_vfmadd_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfmadd_s(_1, _2, _3);} +-v2f64 __lsx_vfmadd_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfmadd_d(_1, _2, _3);} +-v4f32 __lsx_vfmsub_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfmsub_s(_1, _2, _3);} +-v2f64 __lsx_vfmsub_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfmsub_d(_1, _2, _3);} +-v4f32 __lsx_vfnmadd_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfnmadd_s(_1, _2, _3);} +-v2f64 __lsx_vfnmadd_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfnmadd_d(_1, _2, _3);} +-v4f32 __lsx_vfnmsub_s(v4f32 _1, v4f32 _2, v4f32 _3){return __builtin_lsx_vfnmsub_s(_1, _2, _3);} +-v2f64 __lsx_vfnmsub_d(v2f64 _1, v2f64 _2, v2f64 _3){return __builtin_lsx_vfnmsub_d(_1, _2, _3);} +-v4i32 __lsx_vftintrne_w_s(v4f32 _1){return __builtin_lsx_vftintrne_w_s(_1);} +-v2i64 __lsx_vftintrne_l_d(v2f64 _1){return __builtin_lsx_vftintrne_l_d(_1);} +-v4i32 __lsx_vftintrp_w_s(v4f32 _1){return __builtin_lsx_vftintrp_w_s(_1);} +-v2i64 __lsx_vftintrp_l_d(v2f64 _1){return __builtin_lsx_vftintrp_l_d(_1);} +-v4i32 __lsx_vftintrm_w_s(v4f32 _1){return __builtin_lsx_vftintrm_w_s(_1);} +-v2i64 __lsx_vftintrm_l_d(v2f64 _1){return __builtin_lsx_vftintrm_l_d(_1);} +-v4i32 __lsx_vftint_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftint_w_d(_1, _2);} +-v4f32 __lsx_vffint_s_l(v2i64 _1, v2i64 _2){return __builtin_lsx_vffint_s_l(_1, _2);} +-v4i32 __lsx_vftintrz_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrz_w_d(_1, _2);} +-v4i32 __lsx_vftintrp_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrp_w_d(_1, _2);} +-v4i32 __lsx_vftintrm_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrm_w_d(_1, _2);} +-v4i32 __lsx_vftintrne_w_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vftintrne_w_d(_1, _2);} +-v2i64 __lsx_vftintl_l_s(v4f32 _1){return __builtin_lsx_vftintl_l_s(_1);} +-v2i64 __lsx_vftinth_l_s(v4f32 _1){return __builtin_lsx_vftinth_l_s(_1);} +-v2f64 __lsx_vffinth_d_w(v4i32 _1){return __builtin_lsx_vffinth_d_w(_1);} +-v2f64 __lsx_vffintl_d_w(v4i32 _1){return __builtin_lsx_vffintl_d_w(_1);} +-v2i64 __lsx_vftintrzl_l_s(v4f32 _1){return __builtin_lsx_vftintrzl_l_s(_1);} +-v2i64 __lsx_vftintrzh_l_s(v4f32 _1){return __builtin_lsx_vftintrzh_l_s(_1);} +-v2i64 __lsx_vftintrpl_l_s(v4f32 _1){return __builtin_lsx_vftintrpl_l_s(_1);} +-v2i64 __lsx_vftintrph_l_s(v4f32 _1){return __builtin_lsx_vftintrph_l_s(_1);} +-v2i64 __lsx_vftintrml_l_s(v4f32 _1){return __builtin_lsx_vftintrml_l_s(_1);} +-v2i64 __lsx_vftintrmh_l_s(v4f32 _1){return __builtin_lsx_vftintrmh_l_s(_1);} +-v2i64 __lsx_vftintrnel_l_s(v4f32 _1){return __builtin_lsx_vftintrnel_l_s(_1);} +-v2i64 __lsx_vftintrneh_l_s(v4f32 _1){return __builtin_lsx_vftintrneh_l_s(_1);} +-v4i32 __lsx_vfrintrne_s(v4f32 _1){return __builtin_lsx_vfrintrne_s(_1);} +-v2i64 __lsx_vfrintrne_d(v2f64 _1){return __builtin_lsx_vfrintrne_d(_1);} +-v4i32 __lsx_vfrintrz_s(v4f32 _1){return __builtin_lsx_vfrintrz_s(_1);} +-v2i64 __lsx_vfrintrz_d(v2f64 _1){return __builtin_lsx_vfrintrz_d(_1);} +-v4i32 __lsx_vfrintrp_s(v4f32 _1){return __builtin_lsx_vfrintrp_s(_1);} +-v2i64 __lsx_vfrintrp_d(v2f64 _1){return __builtin_lsx_vfrintrp_d(_1);} +-v4i32 __lsx_vfrintrm_s(v4f32 _1){return __builtin_lsx_vfrintrm_s(_1);} +-v2i64 __lsx_vfrintrm_d(v2f64 _1){return __builtin_lsx_vfrintrm_d(_1);} +-void __lsx_vstelm_b(v16i8 _1, void * _2){return __builtin_lsx_vstelm_b(_1, _2, 1, 1);} +-void __lsx_vstelm_h(v8i16 _1, void * _2){return __builtin_lsx_vstelm_h(_1, _2, 2, 1);} +-void __lsx_vstelm_w(v4i32 _1, void * _2){return __builtin_lsx_vstelm_w(_1, _2, 4, 1);} +-void __lsx_vstelm_d(v2i64 _1, void * _2){return __builtin_lsx_vstelm_d(_1, _2, 8, 1);} +-v2i64 __lsx_vaddwev_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vaddwev_d_w(_1, _2);} +-v4i32 __lsx_vaddwev_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vaddwev_w_h(_1, _2);} +-v8i16 __lsx_vaddwev_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vaddwev_h_b(_1, _2);} +-v2i64 __lsx_vaddwod_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vaddwod_d_w(_1, _2);} +-v4i32 __lsx_vaddwod_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vaddwod_w_h(_1, _2);} +-v8i16 __lsx_vaddwod_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vaddwod_h_b(_1, _2);} +-v2i64 __lsx_vaddwev_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vaddwev_d_wu(_1, _2);} +-v4i32 __lsx_vaddwev_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vaddwev_w_hu(_1, _2);} +-v8i16 __lsx_vaddwev_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vaddwev_h_bu(_1, _2);} +-v2i64 __lsx_vaddwod_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vaddwod_d_wu(_1, _2);} +-v4i32 __lsx_vaddwod_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vaddwod_w_hu(_1, _2);} +-v8i16 __lsx_vaddwod_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vaddwod_h_bu(_1, _2);} +-v2i64 __lsx_vaddwev_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vaddwev_d_wu_w(_1, _2);} +-v4i32 __lsx_vaddwev_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vaddwev_w_hu_h(_1, _2);} +-v8i16 __lsx_vaddwev_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vaddwev_h_bu_b(_1, _2);} +-v2i64 __lsx_vaddwod_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vaddwod_d_wu_w(_1, _2);} +-v4i32 __lsx_vaddwod_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vaddwod_w_hu_h(_1, _2);} +-v8i16 __lsx_vaddwod_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vaddwod_h_bu_b(_1, _2);} +-v2i64 __lsx_vsubwev_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsubwev_d_w(_1, _2);} +-v4i32 __lsx_vsubwev_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsubwev_w_h(_1, _2);} +-v8i16 __lsx_vsubwev_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsubwev_h_b(_1, _2);} +-v2i64 __lsx_vsubwod_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vsubwod_d_w(_1, _2);} +-v4i32 __lsx_vsubwod_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vsubwod_w_h(_1, _2);} +-v8i16 __lsx_vsubwod_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vsubwod_h_b(_1, _2);} +-v2i64 __lsx_vsubwev_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsubwev_d_wu(_1, _2);} +-v4i32 __lsx_vsubwev_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsubwev_w_hu(_1, _2);} +-v8i16 __lsx_vsubwev_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsubwev_h_bu(_1, _2);} +-v2i64 __lsx_vsubwod_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vsubwod_d_wu(_1, _2);} +-v4i32 __lsx_vsubwod_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vsubwod_w_hu(_1, _2);} +-v8i16 __lsx_vsubwod_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vsubwod_h_bu(_1, _2);} +-v2i64 __lsx_vaddwev_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vaddwev_q_d(_1, _2);} +-v2i64 __lsx_vaddwod_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vaddwod_q_d(_1, _2);} +-v2i64 __lsx_vaddwev_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vaddwev_q_du(_1, _2);} +-v2i64 __lsx_vaddwod_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vaddwod_q_du(_1, _2);} +-v2i64 __lsx_vsubwev_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsubwev_q_d(_1, _2);} +-v2i64 __lsx_vsubwod_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vsubwod_q_d(_1, _2);} +-v2i64 __lsx_vsubwev_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsubwev_q_du(_1, _2);} +-v2i64 __lsx_vsubwod_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vsubwod_q_du(_1, _2);} +-v2i64 __lsx_vaddwev_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vaddwev_q_du_d(_1, _2);} +-v2i64 __lsx_vaddwod_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vaddwod_q_du_d(_1, _2);} +-v2i64 __lsx_vmulwev_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmulwev_d_w(_1, _2);} +-v4i32 __lsx_vmulwev_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmulwev_w_h(_1, _2);} +-v8i16 __lsx_vmulwev_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmulwev_h_b(_1, _2);} +-v2i64 __lsx_vmulwod_d_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vmulwod_d_w(_1, _2);} +-v4i32 __lsx_vmulwod_w_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vmulwod_w_h(_1, _2);} +-v8i16 __lsx_vmulwod_h_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vmulwod_h_b(_1, _2);} +-v2i64 __lsx_vmulwev_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmulwev_d_wu(_1, _2);} +-v4i32 __lsx_vmulwev_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmulwev_w_hu(_1, _2);} +-v8i16 __lsx_vmulwev_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmulwev_h_bu(_1, _2);} +-v2i64 __lsx_vmulwod_d_wu(v4u32 _1, v4u32 _2){return __builtin_lsx_vmulwod_d_wu(_1, _2);} +-v4i32 __lsx_vmulwod_w_hu(v8u16 _1, v8u16 _2){return __builtin_lsx_vmulwod_w_hu(_1, _2);} +-v8i16 __lsx_vmulwod_h_bu(v16u8 _1, v16u8 _2){return __builtin_lsx_vmulwod_h_bu(_1, _2);} +-v2i64 __lsx_vmulwev_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vmulwev_d_wu_w(_1, _2);} +-v4i32 __lsx_vmulwev_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vmulwev_w_hu_h(_1, _2);} +-v8i16 __lsx_vmulwev_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vmulwev_h_bu_b(_1, _2);} +-v2i64 __lsx_vmulwod_d_wu_w(v4u32 _1, v4i32 _2){return __builtin_lsx_vmulwod_d_wu_w(_1, _2);} +-v4i32 __lsx_vmulwod_w_hu_h(v8u16 _1, v8i16 _2){return __builtin_lsx_vmulwod_w_hu_h(_1, _2);} +-v8i16 __lsx_vmulwod_h_bu_b(v16u8 _1, v16i8 _2){return __builtin_lsx_vmulwod_h_bu_b(_1, _2);} +-v2i64 __lsx_vmulwev_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmulwev_q_d(_1, _2);} +-v2i64 __lsx_vmulwod_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vmulwod_q_d(_1, _2);} +-v2i64 __lsx_vmulwev_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmulwev_q_du(_1, _2);} +-v2i64 __lsx_vmulwod_q_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vmulwod_q_du(_1, _2);} +-v2i64 __lsx_vmulwev_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vmulwev_q_du_d(_1, _2);} +-v2i64 __lsx_vmulwod_q_du_d(v2u64 _1, v2i64 _2){return __builtin_lsx_vmulwod_q_du_d(_1, _2);} +-v2i64 __lsx_vhaddw_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vhaddw_q_d(_1, _2);} +-v2u64 __lsx_vhaddw_qu_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vhaddw_qu_du(_1, _2);} +-v2i64 __lsx_vhsubw_q_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vhsubw_q_d(_1, _2);} +-v2u64 __lsx_vhsubw_qu_du(v2u64 _1, v2u64 _2){return __builtin_lsx_vhsubw_qu_du(_1, _2);} +-v2i64 __lsx_vmaddwev_d_w(v2i64 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmaddwev_d_w(_1, _2, _3);} +-v4i32 __lsx_vmaddwev_w_h(v4i32 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmaddwev_w_h(_1, _2, _3);} +-v8i16 __lsx_vmaddwev_h_b(v8i16 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmaddwev_h_b(_1, _2, _3);} +-v2u64 __lsx_vmaddwev_d_wu(v2u64 _1, v4u32 _2, v4u32 _3){return __builtin_lsx_vmaddwev_d_wu(_1, _2, _3);} +-v4u32 __lsx_vmaddwev_w_hu(v4u32 _1, v8u16 _2, v8u16 _3){return __builtin_lsx_vmaddwev_w_hu(_1, _2, _3);} +-v8u16 __lsx_vmaddwev_h_bu(v8u16 _1, v16u8 _2, v16u8 _3){return __builtin_lsx_vmaddwev_h_bu(_1, _2, _3);} +-v2i64 __lsx_vmaddwod_d_w(v2i64 _1, v4i32 _2, v4i32 _3){return __builtin_lsx_vmaddwod_d_w(_1, _2, _3);} +-v4i32 __lsx_vmaddwod_w_h(v4i32 _1, v8i16 _2, v8i16 _3){return __builtin_lsx_vmaddwod_w_h(_1, _2, _3);} +-v8i16 __lsx_vmaddwod_h_b(v8i16 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vmaddwod_h_b(_1, _2, _3);} +-v2u64 __lsx_vmaddwod_d_wu(v2u64 _1, v4u32 _2, v4u32 _3){return __builtin_lsx_vmaddwod_d_wu(_1, _2, _3);} +-v4u32 __lsx_vmaddwod_w_hu(v4u32 _1, v8u16 _2, v8u16 _3){return __builtin_lsx_vmaddwod_w_hu(_1, _2, _3);} +-v8u16 __lsx_vmaddwod_h_bu(v8u16 _1, v16u8 _2, v16u8 _3){return __builtin_lsx_vmaddwod_h_bu(_1, _2, _3);} +-v2i64 __lsx_vmaddwev_d_wu_w(v2i64 _1, v4u32 _2, v4i32 _3){return __builtin_lsx_vmaddwev_d_wu_w(_1, _2, _3);} +-v4i32 __lsx_vmaddwev_w_hu_h(v4i32 _1, v8u16 _2, v8i16 _3){return __builtin_lsx_vmaddwev_w_hu_h(_1, _2, _3);} +-v8i16 __lsx_vmaddwev_h_bu_b(v8i16 _1, v16u8 _2, v16i8 _3){return __builtin_lsx_vmaddwev_h_bu_b(_1, _2, _3);} +-v2i64 __lsx_vmaddwod_d_wu_w(v2i64 _1, v4u32 _2, v4i32 _3){return __builtin_lsx_vmaddwod_d_wu_w(_1, _2, _3);} +-v4i32 __lsx_vmaddwod_w_hu_h(v4i32 _1, v8u16 _2, v8i16 _3){return __builtin_lsx_vmaddwod_w_hu_h(_1, _2, _3);} +-v8i16 __lsx_vmaddwod_h_bu_b(v8i16 _1, v16u8 _2, v16i8 _3){return __builtin_lsx_vmaddwod_h_bu_b(_1, _2, _3);} +-v2i64 __lsx_vmaddwev_q_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmaddwev_q_d(_1, _2, _3);} +-v2i64 __lsx_vmaddwod_q_d(v2i64 _1, v2i64 _2, v2i64 _3){return __builtin_lsx_vmaddwod_q_d(_1, _2, _3);} +-v2u64 __lsx_vmaddwev_q_du(v2u64 _1, v2u64 _2, v2u64 _3){return __builtin_lsx_vmaddwev_q_du(_1, _2, _3);} +-v2u64 __lsx_vmaddwod_q_du(v2u64 _1, v2u64 _2, v2u64 _3){return __builtin_lsx_vmaddwod_q_du(_1, _2, _3);} +-v2i64 __lsx_vmaddwev_q_du_d(v2i64 _1, v2u64 _2, v2i64 _3){return __builtin_lsx_vmaddwev_q_du_d(_1, _2, _3);} +-v2i64 __lsx_vmaddwod_q_du_d(v2i64 _1, v2u64 _2, v2i64 _3){return __builtin_lsx_vmaddwod_q_du_d(_1, _2, _3);} +-v16i8 __lsx_vrotr_b(v16i8 _1, v16i8 _2){return __builtin_lsx_vrotr_b(_1, _2);} +-v8i16 __lsx_vrotr_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vrotr_h(_1, _2);} +-v4i32 __lsx_vrotr_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vrotr_w(_1, _2);} +-v2i64 __lsx_vrotr_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vrotr_d(_1, _2);} +-v2i64 __lsx_vadd_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vadd_q(_1, _2);} +-v2i64 __lsx_vsub_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsub_q(_1, _2);} +-v16i8 __lsx_vldrepl_b(void * _1){return __builtin_lsx_vldrepl_b(_1, 1);} +-v8i16 __lsx_vldrepl_h(void * _1){return __builtin_lsx_vldrepl_h(_1, 2);} +-v4i32 __lsx_vldrepl_w(void * _1){return __builtin_lsx_vldrepl_w(_1, 4);} +-v2i64 __lsx_vldrepl_d(void * _1){return __builtin_lsx_vldrepl_d(_1, 8);} +-v16i8 __lsx_vmskgez_b(v16i8 _1){return __builtin_lsx_vmskgez_b(_1);} +-v16i8 __lsx_vmsknz_b(v16i8 _1){return __builtin_lsx_vmsknz_b(_1);} +-v8i16 __lsx_vexth_h_b(v16i8 _1){return __builtin_lsx_vexth_h_b(_1);} +-v4i32 __lsx_vexth_w_h(v8i16 _1){return __builtin_lsx_vexth_w_h(_1);} +-v2i64 __lsx_vexth_d_w(v4i32 _1){return __builtin_lsx_vexth_d_w(_1);} +-v2i64 __lsx_vexth_q_d(v2i64 _1){return __builtin_lsx_vexth_q_d(_1);} +-v8u16 __lsx_vexth_hu_bu(v16u8 _1){return __builtin_lsx_vexth_hu_bu(_1);} +-v4u32 __lsx_vexth_wu_hu(v8u16 _1){return __builtin_lsx_vexth_wu_hu(_1);} +-v2u64 __lsx_vexth_du_wu(v4u32 _1){return __builtin_lsx_vexth_du_wu(_1);} +-v2u64 __lsx_vexth_qu_du(v2u64 _1){return __builtin_lsx_vexth_qu_du(_1);} +-v16i8 __lsx_vrotri_b(v16i8 _1){return __builtin_lsx_vrotri_b(_1, 1);} +-v8i16 __lsx_vrotri_h(v8i16 _1){return __builtin_lsx_vrotri_h(_1, 1);} +-v4i32 __lsx_vrotri_w(v4i32 _1){return __builtin_lsx_vrotri_w(_1, 1);} +-v2i64 __lsx_vrotri_d(v2i64 _1){return __builtin_lsx_vrotri_d(_1, 1);} +-v2i64 __lsx_vextl_q_d(v2i64 _1){return __builtin_lsx_vextl_q_d(_1);} +-v16i8 __lsx_vsrlni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrlni_b_h(_1, _2, 1);} +-v8i16 __lsx_vsrlni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlni_h_w(_1, _2, 1);} +-v4i32 __lsx_vsrlni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlni_w_d(_1, _2, 1);} +-v2i64 __lsx_vsrlni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlni_d_q(_1, _2, 1);} +-v16i8 __lsx_vsrlrni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrlrni_b_h(_1, _2, 1);} +-v8i16 __lsx_vsrlrni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrlrni_h_w(_1, _2, 1);} +-v4i32 __lsx_vsrlrni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrlrni_w_d(_1, _2, 1);} +-v2i64 __lsx_vsrlrni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrlrni_d_q(_1, _2, 1);} +-v16i8 __lsx_vssrlni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrlni_b_h(_1, _2, 1);} +-v8i16 __lsx_vssrlni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrlni_h_w(_1, _2, 1);} +-v4i32 __lsx_vssrlni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrlni_w_d(_1, _2, 1);} +-v2i64 __lsx_vssrlni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrlni_d_q(_1, _2, 1);} +-v16u8 __lsx_vssrlni_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrlni_bu_h(_1, _2, 1);} +-v8u16 __lsx_vssrlni_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrlni_hu_w(_1, _2, 1);} +-v4u32 __lsx_vssrlni_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrlni_wu_d(_1, _2, 1);} +-v2u64 __lsx_vssrlni_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrlni_du_q(_1, _2, 1);} +-v16i8 __lsx_vssrlrni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrlrni_b_h(_1, _2, 1);} +-v8i16 __lsx_vssrlrni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrlrni_h_w(_1, _2, 1);} +-v4i32 __lsx_vssrlrni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrlrni_w_d(_1, _2, 1);} +-v2i64 __lsx_vssrlrni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrlrni_d_q(_1, _2, 1);} +-v16u8 __lsx_vssrlrni_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrlrni_bu_h(_1, _2, 1);} +-v8u16 __lsx_vssrlrni_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrlrni_hu_w(_1, _2, 1);} +-v4u32 __lsx_vssrlrni_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrlrni_wu_d(_1, _2, 1);} +-v2u64 __lsx_vssrlrni_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrlrni_du_q(_1, _2, 1);} +-v16i8 __lsx_vsrani_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrani_b_h(_1, _2, 1);} +-v8i16 __lsx_vsrani_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrani_h_w(_1, _2, 1);} +-v4i32 __lsx_vsrani_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrani_w_d(_1, _2, 1);} +-v2i64 __lsx_vsrani_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrani_d_q(_1, _2, 1);} +-v16i8 __lsx_vsrarni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vsrarni_b_h(_1, _2, 1);} +-v8i16 __lsx_vsrarni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vsrarni_h_w(_1, _2, 1);} +-v4i32 __lsx_vsrarni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vsrarni_w_d(_1, _2, 1);} +-v2i64 __lsx_vsrarni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vsrarni_d_q(_1, _2, 1);} +-v16i8 __lsx_vssrani_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrani_b_h(_1, _2, 1);} +-v8i16 __lsx_vssrani_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrani_h_w(_1, _2, 1);} +-v4i32 __lsx_vssrani_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrani_w_d(_1, _2, 1);} +-v2i64 __lsx_vssrani_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrani_d_q(_1, _2, 1);} +-v16u8 __lsx_vssrani_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrani_bu_h(_1, _2, 1);} +-v8u16 __lsx_vssrani_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrani_hu_w(_1, _2, 1);} +-v4u32 __lsx_vssrani_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrani_wu_d(_1, _2, 1);} +-v2u64 __lsx_vssrani_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrani_du_q(_1, _2, 1);} +-v16i8 __lsx_vssrarni_b_h(v16i8 _1, v16i8 _2){return __builtin_lsx_vssrarni_b_h(_1, _2, 1);} +-v8i16 __lsx_vssrarni_h_w(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrarni_h_w(_1, _2, 1);} +-v4i32 __lsx_vssrarni_w_d(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrarni_w_d(_1, _2, 1);} +-v2i64 __lsx_vssrarni_d_q(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrarni_d_q(_1, _2, 1);} +-v16u8 __lsx_vssrarni_bu_h(v16u8 _1, v16i8 _2){return __builtin_lsx_vssrarni_bu_h(_1, _2, 1);} +-v8u16 __lsx_vssrarni_hu_w(v8u16 _1, v8i16 _2){return __builtin_lsx_vssrarni_hu_w(_1, _2, 1);} +-v4u32 __lsx_vssrarni_wu_d(v4u32 _1, v4i32 _2){return __builtin_lsx_vssrarni_wu_d(_1, _2, 1);} +-v2u64 __lsx_vssrarni_du_q(v2u64 _1, v2i64 _2){return __builtin_lsx_vssrarni_du_q(_1, _2, 1);} +-v4i32 __lsx_vpermi_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vpermi_w(_1, _2, 1);} +-v16i8 __lsx_vld(void * _1){return __builtin_lsx_vld(_1, 1);} +-void __lsx_vst(v16i8 _1, void * _2){return __builtin_lsx_vst(_1, _2, 1);} +-v16i8 __lsx_vssrlrn_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrlrn_b_h(_1, _2);} +-v8i16 __lsx_vssrlrn_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrlrn_h_w(_1, _2);} +-v4i32 __lsx_vssrlrn_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrlrn_w_d(_1, _2);} +-v16i8 __lsx_vssrln_b_h(v8i16 _1, v8i16 _2){return __builtin_lsx_vssrln_b_h(_1, _2);} +-v8i16 __lsx_vssrln_h_w(v4i32 _1, v4i32 _2){return __builtin_lsx_vssrln_h_w(_1, _2);} +-v4i32 __lsx_vssrln_w_d(v2i64 _1, v2i64 _2){return __builtin_lsx_vssrln_w_d(_1, _2);} +-v16i8 __lsx_vorn_v(v16i8 _1, v16i8 _2){return __builtin_lsx_vorn_v(_1, _2);} +-v2i64 __lsx_vldi(){return __builtin_lsx_vldi(1);} +-v16i8 __lsx_vshuf_b(v16i8 _1, v16i8 _2, v16i8 _3){return __builtin_lsx_vshuf_b(_1, _2, _3);} +-v16i8 __lsx_vldx(void * _1){return __builtin_lsx_vldx(_1, 1);} +-void __lsx_vstx(v16i8 _1, void * _2){return __builtin_lsx_vstx(_1, _2, 1);} +-v2u64 __lsx_vextl_qu_du(v2u64 _1){return __builtin_lsx_vextl_qu_du(_1);} +-int __lsx_bnz_b(v16u8 _1){return __builtin_lsx_bnz_b(_1);} +-int __lsx_bnz_d(v2u64 _1){return __builtin_lsx_bnz_d(_1);} +-int __lsx_bnz_h(v8u16 _1){return __builtin_lsx_bnz_h(_1);} +-int __lsx_bnz_v(v16u8 _1){return __builtin_lsx_bnz_v(_1);} +-int __lsx_bnz_w(v4u32 _1){return __builtin_lsx_bnz_w(_1);} +-int __lsx_bz_b(v16u8 _1){return __builtin_lsx_bz_b(_1);} +-int __lsx_bz_d(v2u64 _1){return __builtin_lsx_bz_d(_1);} +-int __lsx_bz_h(v8u16 _1){return __builtin_lsx_bz_h(_1);} +-int __lsx_bz_v(v16u8 _1){return __builtin_lsx_bz_v(_1);} +-int __lsx_bz_w(v4u32 _1){return __builtin_lsx_bz_w(_1);} +-v2i64 __lsx_vfcmp_caf_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_caf_d(_1, _2);} +-v4i32 __lsx_vfcmp_caf_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_caf_s(_1, _2);} +-v2i64 __lsx_vfcmp_ceq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_ceq_d(_1, _2);} +-v4i32 __lsx_vfcmp_ceq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_ceq_s(_1, _2);} +-v2i64 __lsx_vfcmp_cle_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cle_d(_1, _2);} +-v4i32 __lsx_vfcmp_cle_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cle_s(_1, _2);} +-v2i64 __lsx_vfcmp_clt_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_clt_d(_1, _2);} +-v4i32 __lsx_vfcmp_clt_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_clt_s(_1, _2);} +-v2i64 __lsx_vfcmp_cne_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cne_d(_1, _2);} +-v4i32 __lsx_vfcmp_cne_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cne_s(_1, _2);} +-v2i64 __lsx_vfcmp_cor_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cor_d(_1, _2);} +-v4i32 __lsx_vfcmp_cor_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cor_s(_1, _2);} +-v2i64 __lsx_vfcmp_cueq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cueq_d(_1, _2);} +-v4i32 __lsx_vfcmp_cueq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cueq_s(_1, _2);} +-v2i64 __lsx_vfcmp_cule_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cule_d(_1, _2);} +-v4i32 __lsx_vfcmp_cule_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cule_s(_1, _2);} +-v2i64 __lsx_vfcmp_cult_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cult_d(_1, _2);} +-v4i32 __lsx_vfcmp_cult_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cult_s(_1, _2);} +-v2i64 __lsx_vfcmp_cun_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cun_d(_1, _2);} +-v2i64 __lsx_vfcmp_cune_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_cune_d(_1, _2);} +-v4i32 __lsx_vfcmp_cune_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cune_s(_1, _2);} +-v4i32 __lsx_vfcmp_cun_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_cun_s(_1, _2);} +-v2i64 __lsx_vfcmp_saf_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_saf_d(_1, _2);} +-v4i32 __lsx_vfcmp_saf_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_saf_s(_1, _2);} +-v2i64 __lsx_vfcmp_seq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_seq_d(_1, _2);} +-v4i32 __lsx_vfcmp_seq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_seq_s(_1, _2);} +-v2i64 __lsx_vfcmp_sle_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sle_d(_1, _2);} +-v4i32 __lsx_vfcmp_sle_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sle_s(_1, _2);} +-v2i64 __lsx_vfcmp_slt_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_slt_d(_1, _2);} +-v4i32 __lsx_vfcmp_slt_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_slt_s(_1, _2);} +-v2i64 __lsx_vfcmp_sne_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sne_d(_1, _2);} +-v4i32 __lsx_vfcmp_sne_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sne_s(_1, _2);} +-v2i64 __lsx_vfcmp_sor_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sor_d(_1, _2);} +-v4i32 __lsx_vfcmp_sor_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sor_s(_1, _2);} +-v2i64 __lsx_vfcmp_sueq_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sueq_d(_1, _2);} +-v4i32 __lsx_vfcmp_sueq_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sueq_s(_1, _2);} +-v2i64 __lsx_vfcmp_sule_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sule_d(_1, _2);} +-v4i32 __lsx_vfcmp_sule_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sule_s(_1, _2);} +-v2i64 __lsx_vfcmp_sult_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sult_d(_1, _2);} +-v4i32 __lsx_vfcmp_sult_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sult_s(_1, _2);} +-v2i64 __lsx_vfcmp_sun_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sun_d(_1, _2);} +-v2i64 __lsx_vfcmp_sune_d(v2f64 _1, v2f64 _2){return __builtin_lsx_vfcmp_sune_d(_1, _2);} +-v4i32 __lsx_vfcmp_sune_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sune_s(_1, _2);} +-v4i32 __lsx_vfcmp_sun_s(v4f32 _1, v4f32 _2){return __builtin_lsx_vfcmp_sun_s(_1, _2);} +-v16i8 __lsx_vrepli_b(){return __builtin_lsx_vrepli_b(1);} +-v2i64 __lsx_vrepli_d(){return __builtin_lsx_vrepli_d(1);} +-v8i16 __lsx_vrepli_h(){return __builtin_lsx_vrepli_h(1);} +-v4i32 __lsx_vrepli_w(){return __builtin_lsx_vrepli_w(1);} ++v16i8 ++__lsx_vsll_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsll_b (_1, _2); ++} ++v8i16 ++__lsx_vsll_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsll_h (_1, _2); ++} ++v4i32 ++__lsx_vsll_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsll_w (_1, _2); ++} ++v2i64 ++__lsx_vsll_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsll_d (_1, _2); ++} ++v16i8 ++__lsx_vslli_b (v16i8 _1) ++{ ++ return __builtin_lsx_vslli_b (_1, 1); ++} ++v8i16 ++__lsx_vslli_h (v8i16 _1) ++{ ++ return __builtin_lsx_vslli_h (_1, 1); ++} ++v4i32 ++__lsx_vslli_w (v4i32 _1) ++{ ++ return __builtin_lsx_vslli_w (_1, 1); ++} ++v2i64 ++__lsx_vslli_d (v2i64 _1) ++{ ++ return __builtin_lsx_vslli_d (_1, 1); ++} ++v16i8 ++__lsx_vsra_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsra_b (_1, _2); ++} ++v8i16 ++__lsx_vsra_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsra_h (_1, _2); ++} ++v4i32 ++__lsx_vsra_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsra_w (_1, _2); ++} ++v2i64 ++__lsx_vsra_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsra_d (_1, _2); ++} ++v16i8 ++__lsx_vsrai_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsrai_b (_1, 1); ++} ++v8i16 ++__lsx_vsrai_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsrai_h (_1, 1); ++} ++v4i32 ++__lsx_vsrai_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsrai_w (_1, 1); ++} ++v2i64 ++__lsx_vsrai_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsrai_d (_1, 1); ++} ++v16i8 ++__lsx_vsrar_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrar_b (_1, _2); ++} ++v8i16 ++__lsx_vsrar_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrar_h (_1, _2); ++} ++v4i32 ++__lsx_vsrar_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrar_w (_1, _2); ++} ++v2i64 ++__lsx_vsrar_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrar_d (_1, _2); ++} ++v16i8 ++__lsx_vsrari_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsrari_b (_1, 1); ++} ++v8i16 ++__lsx_vsrari_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsrari_h (_1, 1); ++} ++v4i32 ++__lsx_vsrari_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsrari_w (_1, 1); ++} ++v2i64 ++__lsx_vsrari_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsrari_d (_1, 1); ++} ++v16i8 ++__lsx_vsrl_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrl_b (_1, _2); ++} ++v8i16 ++__lsx_vsrl_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrl_h (_1, _2); ++} ++v4i32 ++__lsx_vsrl_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrl_w (_1, _2); ++} ++v2i64 ++__lsx_vsrl_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrl_d (_1, _2); ++} ++v16i8 ++__lsx_vsrli_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsrli_b (_1, 1); ++} ++v8i16 ++__lsx_vsrli_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsrli_h (_1, 1); ++} ++v4i32 ++__lsx_vsrli_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsrli_w (_1, 1); ++} ++v2i64 ++__lsx_vsrli_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsrli_d (_1, 1); ++} ++v16i8 ++__lsx_vsrlr_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrlr_b (_1, _2); ++} ++v8i16 ++__lsx_vsrlr_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrlr_h (_1, _2); ++} ++v4i32 ++__lsx_vsrlr_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrlr_w (_1, _2); ++} ++v2i64 ++__lsx_vsrlr_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrlr_d (_1, _2); ++} ++v16i8 ++__lsx_vsrlri_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsrlri_b (_1, 1); ++} ++v8i16 ++__lsx_vsrlri_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsrlri_h (_1, 1); ++} ++v4i32 ++__lsx_vsrlri_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsrlri_w (_1, 1); ++} ++v2i64 ++__lsx_vsrlri_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsrlri_d (_1, 1); ++} ++v16u8 ++__lsx_vbitclr_b (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vbitclr_b (_1, _2); ++} ++v8u16 ++__lsx_vbitclr_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vbitclr_h (_1, _2); ++} ++v4u32 ++__lsx_vbitclr_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vbitclr_w (_1, _2); ++} ++v2u64 ++__lsx_vbitclr_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vbitclr_d (_1, _2); ++} ++v16u8 ++__lsx_vbitclri_b (v16u8 _1) ++{ ++ return __builtin_lsx_vbitclri_b (_1, 1); ++} ++v8u16 ++__lsx_vbitclri_h (v8u16 _1) ++{ ++ return __builtin_lsx_vbitclri_h (_1, 1); ++} ++v4u32 ++__lsx_vbitclri_w (v4u32 _1) ++{ ++ return __builtin_lsx_vbitclri_w (_1, 1); ++} ++v2u64 ++__lsx_vbitclri_d (v2u64 _1) ++{ ++ return __builtin_lsx_vbitclri_d (_1, 1); ++} ++v16u8 ++__lsx_vbitset_b (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vbitset_b (_1, _2); ++} ++v8u16 ++__lsx_vbitset_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vbitset_h (_1, _2); ++} ++v4u32 ++__lsx_vbitset_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vbitset_w (_1, _2); ++} ++v2u64 ++__lsx_vbitset_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vbitset_d (_1, _2); ++} ++v16u8 ++__lsx_vbitseti_b (v16u8 _1) ++{ ++ return __builtin_lsx_vbitseti_b (_1, 1); ++} ++v8u16 ++__lsx_vbitseti_h (v8u16 _1) ++{ ++ return __builtin_lsx_vbitseti_h (_1, 1); ++} ++v4u32 ++__lsx_vbitseti_w (v4u32 _1) ++{ ++ return __builtin_lsx_vbitseti_w (_1, 1); ++} ++v2u64 ++__lsx_vbitseti_d (v2u64 _1) ++{ ++ return __builtin_lsx_vbitseti_d (_1, 1); ++} ++v16u8 ++__lsx_vbitrev_b (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vbitrev_b (_1, _2); ++} ++v8u16 ++__lsx_vbitrev_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vbitrev_h (_1, _2); ++} ++v4u32 ++__lsx_vbitrev_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vbitrev_w (_1, _2); ++} ++v2u64 ++__lsx_vbitrev_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vbitrev_d (_1, _2); ++} ++v16u8 ++__lsx_vbitrevi_b (v16u8 _1) ++{ ++ return __builtin_lsx_vbitrevi_b (_1, 1); ++} ++v8u16 ++__lsx_vbitrevi_h (v8u16 _1) ++{ ++ return __builtin_lsx_vbitrevi_h (_1, 1); ++} ++v4u32 ++__lsx_vbitrevi_w (v4u32 _1) ++{ ++ return __builtin_lsx_vbitrevi_w (_1, 1); ++} ++v2u64 ++__lsx_vbitrevi_d (v2u64 _1) ++{ ++ return __builtin_lsx_vbitrevi_d (_1, 1); ++} ++v16i8 ++__lsx_vadd_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vadd_b (_1, _2); ++} ++v8i16 ++__lsx_vadd_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vadd_h (_1, _2); ++} ++v4i32 ++__lsx_vadd_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vadd_w (_1, _2); ++} ++v2i64 ++__lsx_vadd_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vadd_d (_1, _2); ++} ++v16i8 ++__lsx_vaddi_bu (v16i8 _1) ++{ ++ return __builtin_lsx_vaddi_bu (_1, 1); ++} ++v8i16 ++__lsx_vaddi_hu (v8i16 _1) ++{ ++ return __builtin_lsx_vaddi_hu (_1, 1); ++} ++v4i32 ++__lsx_vaddi_wu (v4i32 _1) ++{ ++ return __builtin_lsx_vaddi_wu (_1, 1); ++} ++v2i64 ++__lsx_vaddi_du (v2i64 _1) ++{ ++ return __builtin_lsx_vaddi_du (_1, 1); ++} ++v16i8 ++__lsx_vsub_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsub_b (_1, _2); ++} ++v8i16 ++__lsx_vsub_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsub_h (_1, _2); ++} ++v4i32 ++__lsx_vsub_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsub_w (_1, _2); ++} ++v2i64 ++__lsx_vsub_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsub_d (_1, _2); ++} ++v16i8 ++__lsx_vsubi_bu (v16i8 _1) ++{ ++ return __builtin_lsx_vsubi_bu (_1, 1); ++} ++v8i16 ++__lsx_vsubi_hu (v8i16 _1) ++{ ++ return __builtin_lsx_vsubi_hu (_1, 1); ++} ++v4i32 ++__lsx_vsubi_wu (v4i32 _1) ++{ ++ return __builtin_lsx_vsubi_wu (_1, 1); ++} ++v2i64 ++__lsx_vsubi_du (v2i64 _1) ++{ ++ return __builtin_lsx_vsubi_du (_1, 1); ++} ++v16i8 ++__lsx_vmax_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmax_b (_1, _2); ++} ++v8i16 ++__lsx_vmax_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmax_h (_1, _2); ++} ++v4i32 ++__lsx_vmax_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmax_w (_1, _2); ++} ++v2i64 ++__lsx_vmax_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmax_d (_1, _2); ++} ++v16i8 ++__lsx_vmaxi_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmaxi_b (_1, 1); ++} ++v8i16 ++__lsx_vmaxi_h (v8i16 _1) ++{ ++ return __builtin_lsx_vmaxi_h (_1, 1); ++} ++v4i32 ++__lsx_vmaxi_w (v4i32 _1) ++{ ++ return __builtin_lsx_vmaxi_w (_1, 1); ++} ++v2i64 ++__lsx_vmaxi_d (v2i64 _1) ++{ ++ return __builtin_lsx_vmaxi_d (_1, 1); ++} ++v16u8 ++__lsx_vmax_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmax_bu (_1, _2); ++} ++v8u16 ++__lsx_vmax_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmax_hu (_1, _2); ++} ++v4u32 ++__lsx_vmax_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmax_wu (_1, _2); ++} ++v2u64 ++__lsx_vmax_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmax_du (_1, _2); ++} ++v16u8 ++__lsx_vmaxi_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vmaxi_bu (_1, 1); ++} ++v8u16 ++__lsx_vmaxi_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vmaxi_hu (_1, 1); ++} ++v4u32 ++__lsx_vmaxi_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vmaxi_wu (_1, 1); ++} ++v2u64 ++__lsx_vmaxi_du (v2u64 _1) ++{ ++ return __builtin_lsx_vmaxi_du (_1, 1); ++} ++v16i8 ++__lsx_vmin_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmin_b (_1, _2); ++} ++v8i16 ++__lsx_vmin_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmin_h (_1, _2); ++} ++v4i32 ++__lsx_vmin_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmin_w (_1, _2); ++} ++v2i64 ++__lsx_vmin_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmin_d (_1, _2); ++} ++v16i8 ++__lsx_vmini_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmini_b (_1, 1); ++} ++v8i16 ++__lsx_vmini_h (v8i16 _1) ++{ ++ return __builtin_lsx_vmini_h (_1, 1); ++} ++v4i32 ++__lsx_vmini_w (v4i32 _1) ++{ ++ return __builtin_lsx_vmini_w (_1, 1); ++} ++v2i64 ++__lsx_vmini_d (v2i64 _1) ++{ ++ return __builtin_lsx_vmini_d (_1, 1); ++} ++v16u8 ++__lsx_vmin_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmin_bu (_1, _2); ++} ++v8u16 ++__lsx_vmin_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmin_hu (_1, _2); ++} ++v4u32 ++__lsx_vmin_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmin_wu (_1, _2); ++} ++v2u64 ++__lsx_vmin_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmin_du (_1, _2); ++} ++v16u8 ++__lsx_vmini_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vmini_bu (_1, 1); ++} ++v8u16 ++__lsx_vmini_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vmini_hu (_1, 1); ++} ++v4u32 ++__lsx_vmini_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vmini_wu (_1, 1); ++} ++v2u64 ++__lsx_vmini_du (v2u64 _1) ++{ ++ return __builtin_lsx_vmini_du (_1, 1); ++} ++v16i8 ++__lsx_vseq_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vseq_b (_1, _2); ++} ++v8i16 ++__lsx_vseq_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vseq_h (_1, _2); ++} ++v4i32 ++__lsx_vseq_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vseq_w (_1, _2); ++} ++v2i64 ++__lsx_vseq_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vseq_d (_1, _2); ++} ++v16i8 ++__lsx_vseqi_b (v16i8 _1) ++{ ++ return __builtin_lsx_vseqi_b (_1, 1); ++} ++v8i16 ++__lsx_vseqi_h (v8i16 _1) ++{ ++ return __builtin_lsx_vseqi_h (_1, 1); ++} ++v4i32 ++__lsx_vseqi_w (v4i32 _1) ++{ ++ return __builtin_lsx_vseqi_w (_1, 1); ++} ++v2i64 ++__lsx_vseqi_d (v2i64 _1) ++{ ++ return __builtin_lsx_vseqi_d (_1, 1); ++} ++v16i8 ++__lsx_vslti_b (v16i8 _1) ++{ ++ return __builtin_lsx_vslti_b (_1, 1); ++} ++v16i8 ++__lsx_vslt_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vslt_b (_1, _2); ++} ++v8i16 ++__lsx_vslt_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vslt_h (_1, _2); ++} ++v4i32 ++__lsx_vslt_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vslt_w (_1, _2); ++} ++v2i64 ++__lsx_vslt_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vslt_d (_1, _2); ++} ++v8i16 ++__lsx_vslti_h (v8i16 _1) ++{ ++ return __builtin_lsx_vslti_h (_1, 1); ++} ++v4i32 ++__lsx_vslti_w (v4i32 _1) ++{ ++ return __builtin_lsx_vslti_w (_1, 1); ++} ++v2i64 ++__lsx_vslti_d (v2i64 _1) ++{ ++ return __builtin_lsx_vslti_d (_1, 1); ++} ++v16i8 ++__lsx_vslt_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vslt_bu (_1, _2); ++} ++v8i16 ++__lsx_vslt_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vslt_hu (_1, _2); ++} ++v4i32 ++__lsx_vslt_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vslt_wu (_1, _2); ++} ++v2i64 ++__lsx_vslt_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vslt_du (_1, _2); ++} ++v16i8 ++__lsx_vslti_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vslti_bu (_1, 1); ++} ++v8i16 ++__lsx_vslti_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vslti_hu (_1, 1); ++} ++v4i32 ++__lsx_vslti_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vslti_wu (_1, 1); ++} ++v2i64 ++__lsx_vslti_du (v2u64 _1) ++{ ++ return __builtin_lsx_vslti_du (_1, 1); ++} ++v16i8 ++__lsx_vsle_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsle_b (_1, _2); ++} ++v8i16 ++__lsx_vsle_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsle_h (_1, _2); ++} ++v4i32 ++__lsx_vsle_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsle_w (_1, _2); ++} ++v2i64 ++__lsx_vsle_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsle_d (_1, _2); ++} ++v16i8 ++__lsx_vslei_b (v16i8 _1) ++{ ++ return __builtin_lsx_vslei_b (_1, 1); ++} ++v8i16 ++__lsx_vslei_h (v8i16 _1) ++{ ++ return __builtin_lsx_vslei_h (_1, 1); ++} ++v4i32 ++__lsx_vslei_w (v4i32 _1) ++{ ++ return __builtin_lsx_vslei_w (_1, 1); ++} ++v2i64 ++__lsx_vslei_d (v2i64 _1) ++{ ++ return __builtin_lsx_vslei_d (_1, 1); ++} ++v16i8 ++__lsx_vsle_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vsle_bu (_1, _2); ++} ++v8i16 ++__lsx_vsle_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vsle_hu (_1, _2); ++} ++v4i32 ++__lsx_vsle_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vsle_wu (_1, _2); ++} ++v2i64 ++__lsx_vsle_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vsle_du (_1, _2); ++} ++v16i8 ++__lsx_vslei_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vslei_bu (_1, 1); ++} ++v8i16 ++__lsx_vslei_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vslei_hu (_1, 1); ++} ++v4i32 ++__lsx_vslei_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vslei_wu (_1, 1); ++} ++v2i64 ++__lsx_vslei_du (v2u64 _1) ++{ ++ return __builtin_lsx_vslei_du (_1, 1); ++} ++v16i8 ++__lsx_vsat_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsat_b (_1, 1); ++} ++v8i16 ++__lsx_vsat_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsat_h (_1, 1); ++} ++v4i32 ++__lsx_vsat_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsat_w (_1, 1); ++} ++v2i64 ++__lsx_vsat_d (v2i64 _1) ++{ ++ return __builtin_lsx_vsat_d (_1, 1); ++} ++v16u8 ++__lsx_vsat_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vsat_bu (_1, 1); ++} ++v8u16 ++__lsx_vsat_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vsat_hu (_1, 1); ++} ++v4u32 ++__lsx_vsat_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vsat_wu (_1, 1); ++} ++v2u64 ++__lsx_vsat_du (v2u64 _1) ++{ ++ return __builtin_lsx_vsat_du (_1, 1); ++} ++v16i8 ++__lsx_vadda_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vadda_b (_1, _2); ++} ++v8i16 ++__lsx_vadda_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vadda_h (_1, _2); ++} ++v4i32 ++__lsx_vadda_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vadda_w (_1, _2); ++} ++v2i64 ++__lsx_vadda_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vadda_d (_1, _2); ++} ++v16i8 ++__lsx_vsadd_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsadd_b (_1, _2); ++} ++v8i16 ++__lsx_vsadd_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsadd_h (_1, _2); ++} ++v4i32 ++__lsx_vsadd_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsadd_w (_1, _2); ++} ++v2i64 ++__lsx_vsadd_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsadd_d (_1, _2); ++} ++v16u8 ++__lsx_vsadd_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vsadd_bu (_1, _2); ++} ++v8u16 ++__lsx_vsadd_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vsadd_hu (_1, _2); ++} ++v4u32 ++__lsx_vsadd_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vsadd_wu (_1, _2); ++} ++v2u64 ++__lsx_vsadd_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vsadd_du (_1, _2); ++} ++v16i8 ++__lsx_vavg_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vavg_b (_1, _2); ++} ++v8i16 ++__lsx_vavg_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vavg_h (_1, _2); ++} ++v4i32 ++__lsx_vavg_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vavg_w (_1, _2); ++} ++v2i64 ++__lsx_vavg_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vavg_d (_1, _2); ++} ++v16u8 ++__lsx_vavg_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vavg_bu (_1, _2); ++} ++v8u16 ++__lsx_vavg_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vavg_hu (_1, _2); ++} ++v4u32 ++__lsx_vavg_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vavg_wu (_1, _2); ++} ++v2u64 ++__lsx_vavg_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vavg_du (_1, _2); ++} ++v16i8 ++__lsx_vavgr_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vavgr_b (_1, _2); ++} ++v8i16 ++__lsx_vavgr_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vavgr_h (_1, _2); ++} ++v4i32 ++__lsx_vavgr_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vavgr_w (_1, _2); ++} ++v2i64 ++__lsx_vavgr_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vavgr_d (_1, _2); ++} ++v16u8 ++__lsx_vavgr_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vavgr_bu (_1, _2); ++} ++v8u16 ++__lsx_vavgr_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vavgr_hu (_1, _2); ++} ++v4u32 ++__lsx_vavgr_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vavgr_wu (_1, _2); ++} ++v2u64 ++__lsx_vavgr_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vavgr_du (_1, _2); ++} ++v16i8 ++__lsx_vssub_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssub_b (_1, _2); ++} ++v8i16 ++__lsx_vssub_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssub_h (_1, _2); ++} ++v4i32 ++__lsx_vssub_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssub_w (_1, _2); ++} ++v2i64 ++__lsx_vssub_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssub_d (_1, _2); ++} ++v16u8 ++__lsx_vssub_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vssub_bu (_1, _2); ++} ++v8u16 ++__lsx_vssub_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssub_hu (_1, _2); ++} ++v4u32 ++__lsx_vssub_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssub_wu (_1, _2); ++} ++v2u64 ++__lsx_vssub_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssub_du (_1, _2); ++} ++v16i8 ++__lsx_vabsd_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vabsd_b (_1, _2); ++} ++v8i16 ++__lsx_vabsd_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vabsd_h (_1, _2); ++} ++v4i32 ++__lsx_vabsd_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vabsd_w (_1, _2); ++} ++v2i64 ++__lsx_vabsd_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vabsd_d (_1, _2); ++} ++v16u8 ++__lsx_vabsd_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vabsd_bu (_1, _2); ++} ++v8u16 ++__lsx_vabsd_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vabsd_hu (_1, _2); ++} ++v4u32 ++__lsx_vabsd_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vabsd_wu (_1, _2); ++} ++v2u64 ++__lsx_vabsd_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vabsd_du (_1, _2); ++} ++v16i8 ++__lsx_vmul_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmul_b (_1, _2); ++} ++v8i16 ++__lsx_vmul_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmul_h (_1, _2); ++} ++v4i32 ++__lsx_vmul_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmul_w (_1, _2); ++} ++v2i64 ++__lsx_vmul_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmul_d (_1, _2); ++} ++v16i8 ++__lsx_vmadd_b (v16i8 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmadd_b (_1, _2, _3); ++} ++v8i16 ++__lsx_vmadd_h (v8i16 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmadd_h (_1, _2, _3); ++} ++v4i32 ++__lsx_vmadd_w (v4i32 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmadd_w (_1, _2, _3); ++} ++v2i64 ++__lsx_vmadd_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmadd_d (_1, _2, _3); ++} ++v16i8 ++__lsx_vmsub_b (v16i8 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmsub_b (_1, _2, _3); ++} ++v8i16 ++__lsx_vmsub_h (v8i16 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmsub_h (_1, _2, _3); ++} ++v4i32 ++__lsx_vmsub_w (v4i32 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmsub_w (_1, _2, _3); ++} ++v2i64 ++__lsx_vmsub_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmsub_d (_1, _2, _3); ++} ++v16i8 ++__lsx_vdiv_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vdiv_b (_1, _2); ++} ++v8i16 ++__lsx_vdiv_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vdiv_h (_1, _2); ++} ++v4i32 ++__lsx_vdiv_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vdiv_w (_1, _2); ++} ++v2i64 ++__lsx_vdiv_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vdiv_d (_1, _2); ++} ++v16u8 ++__lsx_vdiv_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vdiv_bu (_1, _2); ++} ++v8u16 ++__lsx_vdiv_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vdiv_hu (_1, _2); ++} ++v4u32 ++__lsx_vdiv_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vdiv_wu (_1, _2); ++} ++v2u64 ++__lsx_vdiv_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vdiv_du (_1, _2); ++} ++v8i16 ++__lsx_vhaddw_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vhaddw_h_b (_1, _2); ++} ++v4i32 ++__lsx_vhaddw_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vhaddw_w_h (_1, _2); ++} ++v2i64 ++__lsx_vhaddw_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vhaddw_d_w (_1, _2); ++} ++v8u16 ++__lsx_vhaddw_hu_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vhaddw_hu_bu (_1, _2); ++} ++v4u32 ++__lsx_vhaddw_wu_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vhaddw_wu_hu (_1, _2); ++} ++v2u64 ++__lsx_vhaddw_du_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vhaddw_du_wu (_1, _2); ++} ++v8i16 ++__lsx_vhsubw_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vhsubw_h_b (_1, _2); ++} ++v4i32 ++__lsx_vhsubw_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vhsubw_w_h (_1, _2); ++} ++v2i64 ++__lsx_vhsubw_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vhsubw_d_w (_1, _2); ++} ++v8i16 ++__lsx_vhsubw_hu_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vhsubw_hu_bu (_1, _2); ++} ++v4i32 ++__lsx_vhsubw_wu_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vhsubw_wu_hu (_1, _2); ++} ++v2i64 ++__lsx_vhsubw_du_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vhsubw_du_wu (_1, _2); ++} ++v16i8 ++__lsx_vmod_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmod_b (_1, _2); ++} ++v8i16 ++__lsx_vmod_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmod_h (_1, _2); ++} ++v4i32 ++__lsx_vmod_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmod_w (_1, _2); ++} ++v2i64 ++__lsx_vmod_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmod_d (_1, _2); ++} ++v16u8 ++__lsx_vmod_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmod_bu (_1, _2); ++} ++v8u16 ++__lsx_vmod_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmod_hu (_1, _2); ++} ++v4u32 ++__lsx_vmod_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmod_wu (_1, _2); ++} ++v2u64 ++__lsx_vmod_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmod_du (_1, _2); ++} ++v16i8 ++__lsx_vreplve_b (v16i8 _1, int _2) ++{ ++ return __builtin_lsx_vreplve_b (_1, _2); ++} ++v8i16 ++__lsx_vreplve_h (v8i16 _1, int _2) ++{ ++ return __builtin_lsx_vreplve_h (_1, _2); ++} ++v4i32 ++__lsx_vreplve_w (v4i32 _1, int _2) ++{ ++ return __builtin_lsx_vreplve_w (_1, _2); ++} ++v2i64 ++__lsx_vreplve_d (v2i64 _1, int _2) ++{ ++ return __builtin_lsx_vreplve_d (_1, _2); ++} ++v16i8 ++__lsx_vreplvei_b (v16i8 _1) ++{ ++ return __builtin_lsx_vreplvei_b (_1, 1); ++} ++v8i16 ++__lsx_vreplvei_h (v8i16 _1) ++{ ++ return __builtin_lsx_vreplvei_h (_1, 1); ++} ++v4i32 ++__lsx_vreplvei_w (v4i32 _1) ++{ ++ return __builtin_lsx_vreplvei_w (_1, 1); ++} ++v2i64 ++__lsx_vreplvei_d (v2i64 _1) ++{ ++ return __builtin_lsx_vreplvei_d (_1, 1); ++} ++v16i8 ++__lsx_vpickev_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vpickev_b (_1, _2); ++} ++v8i16 ++__lsx_vpickev_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vpickev_h (_1, _2); ++} ++v4i32 ++__lsx_vpickev_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpickev_w (_1, _2); ++} ++v2i64 ++__lsx_vpickev_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vpickev_d (_1, _2); ++} ++v16i8 ++__lsx_vpickod_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vpickod_b (_1, _2); ++} ++v8i16 ++__lsx_vpickod_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vpickod_h (_1, _2); ++} ++v4i32 ++__lsx_vpickod_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpickod_w (_1, _2); ++} ++v2i64 ++__lsx_vpickod_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vpickod_d (_1, _2); ++} ++v16i8 ++__lsx_vilvh_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vilvh_b (_1, _2); ++} ++v8i16 ++__lsx_vilvh_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vilvh_h (_1, _2); ++} ++v4i32 ++__lsx_vilvh_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vilvh_w (_1, _2); ++} ++v2i64 ++__lsx_vilvh_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vilvh_d (_1, _2); ++} ++v16i8 ++__lsx_vilvl_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vilvl_b (_1, _2); ++} ++v8i16 ++__lsx_vilvl_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vilvl_h (_1, _2); ++} ++v4i32 ++__lsx_vilvl_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vilvl_w (_1, _2); ++} ++v2i64 ++__lsx_vilvl_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vilvl_d (_1, _2); ++} ++v16i8 ++__lsx_vpackev_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vpackev_b (_1, _2); ++} ++v8i16 ++__lsx_vpackev_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vpackev_h (_1, _2); ++} ++v4i32 ++__lsx_vpackev_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpackev_w (_1, _2); ++} ++v2i64 ++__lsx_vpackev_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vpackev_d (_1, _2); ++} ++v16i8 ++__lsx_vpackod_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vpackod_b (_1, _2); ++} ++v8i16 ++__lsx_vpackod_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vpackod_h (_1, _2); ++} ++v4i32 ++__lsx_vpackod_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpackod_w (_1, _2); ++} ++v2i64 ++__lsx_vpackod_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vpackod_d (_1, _2); ++} ++v8i16 ++__lsx_vshuf_h (v8i16 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vshuf_h (_1, _2, _3); ++} ++v4i32 ++__lsx_vshuf_w (v4i32 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vshuf_w (_1, _2, _3); ++} ++v2i64 ++__lsx_vshuf_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vshuf_d (_1, _2, _3); ++} ++v16u8 ++__lsx_vand_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vand_v (_1, _2); ++} ++v16u8 ++__lsx_vandi_b (v16u8 _1) ++{ ++ return __builtin_lsx_vandi_b (_1, 1); ++} ++v16u8 ++__lsx_vor_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vor_v (_1, _2); ++} ++v16u8 ++__lsx_vori_b (v16u8 _1) ++{ ++ return __builtin_lsx_vori_b (_1, 1); ++} ++v16u8 ++__lsx_vnor_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vnor_v (_1, _2); ++} ++v16u8 ++__lsx_vnori_b (v16u8 _1) ++{ ++ return __builtin_lsx_vnori_b (_1, 1); ++} ++v16u8 ++__lsx_vxor_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vxor_v (_1, _2); ++} ++v16u8 ++__lsx_vxori_b (v16u8 _1) ++{ ++ return __builtin_lsx_vxori_b (_1, 1); ++} ++v16u8 ++__lsx_vbitsel_v (v16u8 _1, v16u8 _2, v16u8 _3) ++{ ++ return __builtin_lsx_vbitsel_v (_1, _2, _3); ++} ++v16u8 ++__lsx_vbitseli_b (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vbitseli_b (_1, _2, 1); ++} ++v16i8 ++__lsx_vshuf4i_b (v16i8 _1) ++{ ++ return __builtin_lsx_vshuf4i_b (_1, 1); ++} ++v8i16 ++__lsx_vshuf4i_h (v8i16 _1) ++{ ++ return __builtin_lsx_vshuf4i_h (_1, 1); ++} ++v4i32 ++__lsx_vshuf4i_w (v4i32 _1) ++{ ++ return __builtin_lsx_vshuf4i_w (_1, 1); ++} ++v16i8 ++__lsx_vreplgr2vr_b (int _1) ++{ ++ return __builtin_lsx_vreplgr2vr_b (_1); ++} ++v8i16 ++__lsx_vreplgr2vr_h (int _1) ++{ ++ return __builtin_lsx_vreplgr2vr_h (_1); ++} ++v4i32 ++__lsx_vreplgr2vr_w (int _1) ++{ ++ return __builtin_lsx_vreplgr2vr_w (_1); ++} ++v2i64 ++__lsx_vreplgr2vr_d (long _1) ++{ ++ return __builtin_lsx_vreplgr2vr_d (_1); ++} ++v16i8 ++__lsx_vpcnt_b (v16i8 _1) ++{ ++ return __builtin_lsx_vpcnt_b (_1); ++} ++v8i16 ++__lsx_vpcnt_h (v8i16 _1) ++{ ++ return __builtin_lsx_vpcnt_h (_1); ++} ++v4i32 ++__lsx_vpcnt_w (v4i32 _1) ++{ ++ return __builtin_lsx_vpcnt_w (_1); ++} ++v2i64 ++__lsx_vpcnt_d (v2i64 _1) ++{ ++ return __builtin_lsx_vpcnt_d (_1); ++} ++v16i8 ++__lsx_vclo_b (v16i8 _1) ++{ ++ return __builtin_lsx_vclo_b (_1); ++} ++v8i16 ++__lsx_vclo_h (v8i16 _1) ++{ ++ return __builtin_lsx_vclo_h (_1); ++} ++v4i32 ++__lsx_vclo_w (v4i32 _1) ++{ ++ return __builtin_lsx_vclo_w (_1); ++} ++v2i64 ++__lsx_vclo_d (v2i64 _1) ++{ ++ return __builtin_lsx_vclo_d (_1); ++} ++v16i8 ++__lsx_vclz_b (v16i8 _1) ++{ ++ return __builtin_lsx_vclz_b (_1); ++} ++v8i16 ++__lsx_vclz_h (v8i16 _1) ++{ ++ return __builtin_lsx_vclz_h (_1); ++} ++v4i32 ++__lsx_vclz_w (v4i32 _1) ++{ ++ return __builtin_lsx_vclz_w (_1); ++} ++v2i64 ++__lsx_vclz_d (v2i64 _1) ++{ ++ return __builtin_lsx_vclz_d (_1); ++} ++int ++__lsx_vpickve2gr_b (v16i8 _1) ++{ ++ return __builtin_lsx_vpickve2gr_b (_1, 1); ++} ++int ++__lsx_vpickve2gr_h (v8i16 _1) ++{ ++ return __builtin_lsx_vpickve2gr_h (_1, 1); ++} ++int ++__lsx_vpickve2gr_w (v4i32 _1) ++{ ++ return __builtin_lsx_vpickve2gr_w (_1, 1); ++} ++long ++__lsx_vpickve2gr_d (v2i64 _1) ++{ ++ return __builtin_lsx_vpickve2gr_d (_1, 1); ++} ++unsigned int ++__lsx_vpickve2gr_bu (v16i8 _1) ++{ ++ return __builtin_lsx_vpickve2gr_bu (_1, 1); ++} ++unsigned int ++__lsx_vpickve2gr_hu (v8i16 _1) ++{ ++ return __builtin_lsx_vpickve2gr_hu (_1, 1); ++} ++unsigned int ++__lsx_vpickve2gr_wu (v4i32 _1) ++{ ++ return __builtin_lsx_vpickve2gr_wu (_1, 1); ++} ++unsigned long int ++__lsx_vpickve2gr_du (v2i64 _1) ++{ ++ return __builtin_lsx_vpickve2gr_du (_1, 1); ++} ++v16i8 ++__lsx_vinsgr2vr_b (v16i8 _1) ++{ ++ return __builtin_lsx_vinsgr2vr_b (_1, 1, 1); ++} ++v8i16 ++__lsx_vinsgr2vr_h (v8i16 _1) ++{ ++ return __builtin_lsx_vinsgr2vr_h (_1, 1, 1); ++} ++v4i32 ++__lsx_vinsgr2vr_w (v4i32 _1) ++{ ++ return __builtin_lsx_vinsgr2vr_w (_1, 1, 1); ++} ++v2i64 ++__lsx_vinsgr2vr_d (v2i64 _1) ++{ ++ return __builtin_lsx_vinsgr2vr_d (_1, 1, 1); ++} ++v4f32 ++__lsx_vfadd_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfadd_s (_1, _2); ++} ++v2f64 ++__lsx_vfadd_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfadd_d (_1, _2); ++} ++v4f32 ++__lsx_vfsub_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfsub_s (_1, _2); ++} ++v2f64 ++__lsx_vfsub_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfsub_d (_1, _2); ++} ++v4f32 ++__lsx_vfmul_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmul_s (_1, _2); ++} ++v2f64 ++__lsx_vfmul_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmul_d (_1, _2); ++} ++v4f32 ++__lsx_vfdiv_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfdiv_s (_1, _2); ++} ++v2f64 ++__lsx_vfdiv_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfdiv_d (_1, _2); ++} ++v8i16 ++__lsx_vfcvt_h_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcvt_h_s (_1, _2); ++} ++v4f32 ++__lsx_vfcvt_s_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcvt_s_d (_1, _2); ++} ++v4f32 ++__lsx_vfmin_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmin_s (_1, _2); ++} ++v2f64 ++__lsx_vfmin_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmin_d (_1, _2); ++} ++v4f32 ++__lsx_vfmina_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmina_s (_1, _2); ++} ++v2f64 ++__lsx_vfmina_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmina_d (_1, _2); ++} ++v4f32 ++__lsx_vfmax_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmax_s (_1, _2); ++} ++v2f64 ++__lsx_vfmax_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmax_d (_1, _2); ++} ++v4f32 ++__lsx_vfmaxa_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfmaxa_s (_1, _2); ++} ++v2f64 ++__lsx_vfmaxa_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfmaxa_d (_1, _2); ++} ++v4i32 ++__lsx_vfclass_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfclass_s (_1); ++} ++v2i64 ++__lsx_vfclass_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfclass_d (_1); ++} ++v4f32 ++__lsx_vfsqrt_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfsqrt_s (_1); ++} ++v2f64 ++__lsx_vfsqrt_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfsqrt_d (_1); ++} ++v4f32 ++__lsx_vfrecip_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrecip_s (_1); ++} ++v2f64 ++__lsx_vfrecip_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrecip_d (_1); ++} ++v4f32 ++__lsx_vfrint_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrint_s (_1); ++} ++v2f64 ++__lsx_vfrint_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrint_d (_1); ++} ++v4f32 ++__lsx_vfrsqrt_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrsqrt_s (_1); ++} ++v2f64 ++__lsx_vfrsqrt_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrsqrt_d (_1); ++} ++v4f32 ++__lsx_vflogb_s (v4f32 _1) ++{ ++ return __builtin_lsx_vflogb_s (_1); ++} ++v2f64 ++__lsx_vflogb_d (v2f64 _1) ++{ ++ return __builtin_lsx_vflogb_d (_1); ++} ++v4f32 ++__lsx_vfcvth_s_h (v8i16 _1) ++{ ++ return __builtin_lsx_vfcvth_s_h (_1); ++} ++v2f64 ++__lsx_vfcvth_d_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfcvth_d_s (_1); ++} ++v4f32 ++__lsx_vfcvtl_s_h (v8i16 _1) ++{ ++ return __builtin_lsx_vfcvtl_s_h (_1); ++} ++v2f64 ++__lsx_vfcvtl_d_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfcvtl_d_s (_1); ++} ++v4i32 ++__lsx_vftint_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftint_w_s (_1); ++} ++v2i64 ++__lsx_vftint_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftint_l_d (_1); ++} ++v4u32 ++__lsx_vftint_wu_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftint_wu_s (_1); ++} ++v2u64 ++__lsx_vftint_lu_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftint_lu_d (_1); ++} ++v4i32 ++__lsx_vftintrz_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrz_w_s (_1); ++} ++v2i64 ++__lsx_vftintrz_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrz_l_d (_1); ++} ++v4u32 ++__lsx_vftintrz_wu_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrz_wu_s (_1); ++} ++v2u64 ++__lsx_vftintrz_lu_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrz_lu_d (_1); ++} ++v4f32 ++__lsx_vffint_s_w (v4i32 _1) ++{ ++ return __builtin_lsx_vffint_s_w (_1); ++} ++v2f64 ++__lsx_vffint_d_l (v2i64 _1) ++{ ++ return __builtin_lsx_vffint_d_l (_1); ++} ++v4f32 ++__lsx_vffint_s_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vffint_s_wu (_1); ++} ++v2f64 ++__lsx_vffint_d_lu (v2u64 _1) ++{ ++ return __builtin_lsx_vffint_d_lu (_1); ++} ++v16u8 ++__lsx_vandn_v (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vandn_v (_1, _2); ++} ++v16i8 ++__lsx_vneg_b (v16i8 _1) ++{ ++ return __builtin_lsx_vneg_b (_1); ++} ++v8i16 ++__lsx_vneg_h (v8i16 _1) ++{ ++ return __builtin_lsx_vneg_h (_1); ++} ++v4i32 ++__lsx_vneg_w (v4i32 _1) ++{ ++ return __builtin_lsx_vneg_w (_1); ++} ++v2i64 ++__lsx_vneg_d (v2i64 _1) ++{ ++ return __builtin_lsx_vneg_d (_1); ++} ++v16i8 ++__lsx_vmuh_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmuh_b (_1, _2); ++} ++v8i16 ++__lsx_vmuh_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmuh_h (_1, _2); ++} ++v4i32 ++__lsx_vmuh_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmuh_w (_1, _2); ++} ++v2i64 ++__lsx_vmuh_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmuh_d (_1, _2); ++} ++v16u8 ++__lsx_vmuh_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmuh_bu (_1, _2); ++} ++v8u16 ++__lsx_vmuh_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmuh_hu (_1, _2); ++} ++v4u32 ++__lsx_vmuh_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmuh_wu (_1, _2); ++} ++v2u64 ++__lsx_vmuh_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmuh_du (_1, _2); ++} ++v8i16 ++__lsx_vsllwil_h_b (v16i8 _1) ++{ ++ return __builtin_lsx_vsllwil_h_b (_1, 1); ++} ++v4i32 ++__lsx_vsllwil_w_h (v8i16 _1) ++{ ++ return __builtin_lsx_vsllwil_w_h (_1, 1); ++} ++v2i64 ++__lsx_vsllwil_d_w (v4i32 _1) ++{ ++ return __builtin_lsx_vsllwil_d_w (_1, 1); ++} ++v8u16 ++__lsx_vsllwil_hu_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vsllwil_hu_bu (_1, 1); ++} ++v4u32 ++__lsx_vsllwil_wu_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vsllwil_wu_hu (_1, 1); ++} ++v2u64 ++__lsx_vsllwil_du_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vsllwil_du_wu (_1, 1); ++} ++v16i8 ++__lsx_vsran_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsran_b_h (_1, _2); ++} ++v8i16 ++__lsx_vsran_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsran_h_w (_1, _2); ++} ++v4i32 ++__lsx_vsran_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsran_w_d (_1, _2); ++} ++v16i8 ++__lsx_vssran_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssran_b_h (_1, _2); ++} ++v8i16 ++__lsx_vssran_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssran_h_w (_1, _2); ++} ++v4i32 ++__lsx_vssran_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssran_w_d (_1, _2); ++} ++v16u8 ++__lsx_vssran_bu_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssran_bu_h (_1, _2); ++} ++v8u16 ++__lsx_vssran_hu_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssran_hu_w (_1, _2); ++} ++v4u32 ++__lsx_vssran_wu_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssran_wu_d (_1, _2); ++} ++v16i8 ++__lsx_vsrarn_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrarn_b_h (_1, _2); ++} ++v8i16 ++__lsx_vsrarn_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrarn_h_w (_1, _2); ++} ++v4i32 ++__lsx_vsrarn_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrarn_w_d (_1, _2); ++} ++v16i8 ++__lsx_vssrarn_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrarn_b_h (_1, _2); ++} ++v8i16 ++__lsx_vssrarn_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrarn_h_w (_1, _2); ++} ++v4i32 ++__lsx_vssrarn_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrarn_w_d (_1, _2); ++} ++v16u8 ++__lsx_vssrarn_bu_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssrarn_bu_h (_1, _2); ++} ++v8u16 ++__lsx_vssrarn_hu_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssrarn_hu_w (_1, _2); ++} ++v4u32 ++__lsx_vssrarn_wu_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssrarn_wu_d (_1, _2); ++} ++v16i8 ++__lsx_vsrln_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrln_b_h (_1, _2); ++} ++v8i16 ++__lsx_vsrln_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrln_h_w (_1, _2); ++} ++v4i32 ++__lsx_vsrln_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrln_w_d (_1, _2); ++} ++v16u8 ++__lsx_vssrln_bu_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssrln_bu_h (_1, _2); ++} ++v8u16 ++__lsx_vssrln_hu_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssrln_hu_w (_1, _2); ++} ++v4u32 ++__lsx_vssrln_wu_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssrln_wu_d (_1, _2); ++} ++v16i8 ++__lsx_vsrlrn_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrlrn_b_h (_1, _2); ++} ++v8i16 ++__lsx_vsrlrn_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrlrn_h_w (_1, _2); ++} ++v4i32 ++__lsx_vsrlrn_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrlrn_w_d (_1, _2); ++} ++v16u8 ++__lsx_vssrlrn_bu_h (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vssrlrn_bu_h (_1, _2); ++} ++v8u16 ++__lsx_vssrlrn_hu_w (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vssrlrn_hu_w (_1, _2); ++} ++v4u32 ++__lsx_vssrlrn_wu_d (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vssrlrn_wu_d (_1, _2); ++} ++v16i8 ++__lsx_vfrstpi_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vfrstpi_b (_1, _2, 1); ++} ++v8i16 ++__lsx_vfrstpi_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vfrstpi_h (_1, _2, 1); ++} ++v16i8 ++__lsx_vfrstp_b (v16i8 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vfrstp_b (_1, _2, _3); ++} ++v8i16 ++__lsx_vfrstp_h (v8i16 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vfrstp_h (_1, _2, _3); ++} ++v2i64 ++__lsx_vshuf4i_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vshuf4i_d (_1, _2, 1); ++} ++v16i8 ++__lsx_vbsrl_v (v16i8 _1) ++{ ++ return __builtin_lsx_vbsrl_v (_1, 1); ++} ++v16i8 ++__lsx_vbsll_v (v16i8 _1) ++{ ++ return __builtin_lsx_vbsll_v (_1, 1); ++} ++v16i8 ++__lsx_vextrins_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vextrins_b (_1, _2, 1); ++} ++v8i16 ++__lsx_vextrins_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vextrins_h (_1, _2, 1); ++} ++v4i32 ++__lsx_vextrins_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vextrins_w (_1, _2, 1); ++} ++v2i64 ++__lsx_vextrins_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vextrins_d (_1, _2, 1); ++} ++v16i8 ++__lsx_vmskltz_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmskltz_b (_1); ++} ++v8i16 ++__lsx_vmskltz_h (v8i16 _1) ++{ ++ return __builtin_lsx_vmskltz_h (_1); ++} ++v4i32 ++__lsx_vmskltz_w (v4i32 _1) ++{ ++ return __builtin_lsx_vmskltz_w (_1); ++} ++v2i64 ++__lsx_vmskltz_d (v2i64 _1) ++{ ++ return __builtin_lsx_vmskltz_d (_1); ++} ++v16i8 ++__lsx_vsigncov_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsigncov_b (_1, _2); ++} ++v8i16 ++__lsx_vsigncov_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsigncov_h (_1, _2); ++} ++v4i32 ++__lsx_vsigncov_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsigncov_w (_1, _2); ++} ++v2i64 ++__lsx_vsigncov_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsigncov_d (_1, _2); ++} ++v4f32 ++__lsx_vfmadd_s (v4f32 _1, v4f32 _2, v4f32 _3) ++{ ++ return __builtin_lsx_vfmadd_s (_1, _2, _3); ++} ++v2f64 ++__lsx_vfmadd_d (v2f64 _1, v2f64 _2, v2f64 _3) ++{ ++ return __builtin_lsx_vfmadd_d (_1, _2, _3); ++} ++v4f32 ++__lsx_vfmsub_s (v4f32 _1, v4f32 _2, v4f32 _3) ++{ ++ return __builtin_lsx_vfmsub_s (_1, _2, _3); ++} ++v2f64 ++__lsx_vfmsub_d (v2f64 _1, v2f64 _2, v2f64 _3) ++{ ++ return __builtin_lsx_vfmsub_d (_1, _2, _3); ++} ++v4f32 ++__lsx_vfnmadd_s (v4f32 _1, v4f32 _2, v4f32 _3) ++{ ++ return __builtin_lsx_vfnmadd_s (_1, _2, _3); ++} ++v2f64 ++__lsx_vfnmadd_d (v2f64 _1, v2f64 _2, v2f64 _3) ++{ ++ return __builtin_lsx_vfnmadd_d (_1, _2, _3); ++} ++v4f32 ++__lsx_vfnmsub_s (v4f32 _1, v4f32 _2, v4f32 _3) ++{ ++ return __builtin_lsx_vfnmsub_s (_1, _2, _3); ++} ++v2f64 ++__lsx_vfnmsub_d (v2f64 _1, v2f64 _2, v2f64 _3) ++{ ++ return __builtin_lsx_vfnmsub_d (_1, _2, _3); ++} ++v4i32 ++__lsx_vftintrne_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrne_w_s (_1); ++} ++v2i64 ++__lsx_vftintrne_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrne_l_d (_1); ++} ++v4i32 ++__lsx_vftintrp_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrp_w_s (_1); ++} ++v2i64 ++__lsx_vftintrp_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrp_l_d (_1); ++} ++v4i32 ++__lsx_vftintrm_w_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrm_w_s (_1); ++} ++v2i64 ++__lsx_vftintrm_l_d (v2f64 _1) ++{ ++ return __builtin_lsx_vftintrm_l_d (_1); ++} ++v4i32 ++__lsx_vftint_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftint_w_d (_1, _2); ++} ++v4f32 ++__lsx_vffint_s_l (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vffint_s_l (_1, _2); ++} ++v4i32 ++__lsx_vftintrz_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftintrz_w_d (_1, _2); ++} ++v4i32 ++__lsx_vftintrp_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftintrp_w_d (_1, _2); ++} ++v4i32 ++__lsx_vftintrm_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftintrm_w_d (_1, _2); ++} ++v4i32 ++__lsx_vftintrne_w_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vftintrne_w_d (_1, _2); ++} ++v2i64 ++__lsx_vftintl_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintl_l_s (_1); ++} ++v2i64 ++__lsx_vftinth_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftinth_l_s (_1); ++} ++v2f64 ++__lsx_vffinth_d_w (v4i32 _1) ++{ ++ return __builtin_lsx_vffinth_d_w (_1); ++} ++v2f64 ++__lsx_vffintl_d_w (v4i32 _1) ++{ ++ return __builtin_lsx_vffintl_d_w (_1); ++} ++v2i64 ++__lsx_vftintrzl_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrzl_l_s (_1); ++} ++v2i64 ++__lsx_vftintrzh_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrzh_l_s (_1); ++} ++v2i64 ++__lsx_vftintrpl_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrpl_l_s (_1); ++} ++v2i64 ++__lsx_vftintrph_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrph_l_s (_1); ++} ++v2i64 ++__lsx_vftintrml_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrml_l_s (_1); ++} ++v2i64 ++__lsx_vftintrmh_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrmh_l_s (_1); ++} ++v2i64 ++__lsx_vftintrnel_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrnel_l_s (_1); ++} ++v2i64 ++__lsx_vftintrneh_l_s (v4f32 _1) ++{ ++ return __builtin_lsx_vftintrneh_l_s (_1); ++} ++v4f32 ++__lsx_vfrintrne_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrintrne_s (_1); ++} ++v2f64 ++__lsx_vfrintrne_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrintrne_d (_1); ++} ++v4f32 ++__lsx_vfrintrz_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrintrz_s (_1); ++} ++v2f64 ++__lsx_vfrintrz_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrintrz_d (_1); ++} ++v4f32 ++__lsx_vfrintrp_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrintrp_s (_1); ++} ++v2f64 ++__lsx_vfrintrp_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrintrp_d (_1); ++} ++v4f32 ++__lsx_vfrintrm_s (v4f32 _1) ++{ ++ return __builtin_lsx_vfrintrm_s (_1); ++} ++v2f64 ++__lsx_vfrintrm_d (v2f64 _1) ++{ ++ return __builtin_lsx_vfrintrm_d (_1); ++} ++void ++__lsx_vstelm_b (v16i8 _1, void *_2) ++{ ++ return __builtin_lsx_vstelm_b (_1, _2, 1, 1); ++} ++void ++__lsx_vstelm_h (v8i16 _1, void *_2) ++{ ++ return __builtin_lsx_vstelm_h (_1, _2, 2, 1); ++} ++void ++__lsx_vstelm_w (v4i32 _1, void *_2) ++{ ++ return __builtin_lsx_vstelm_w (_1, _2, 4, 1); ++} ++void ++__lsx_vstelm_d (v2i64 _1, void *_2) ++{ ++ return __builtin_lsx_vstelm_d (_1, _2, 8, 1); ++} ++v2i64 ++__lsx_vaddwev_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vaddwev_d_w (_1, _2); ++} ++v4i32 ++__lsx_vaddwev_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vaddwev_w_h (_1, _2); ++} ++v8i16 ++__lsx_vaddwev_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vaddwev_h_b (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vaddwod_d_w (_1, _2); ++} ++v4i32 ++__lsx_vaddwod_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vaddwod_w_h (_1, _2); ++} ++v8i16 ++__lsx_vaddwod_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vaddwod_h_b (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vaddwev_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vaddwev_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vaddwev_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vaddwev_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vaddwev_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vaddwod_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vaddwod_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vaddwod_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vaddwod_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vaddwod_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_d_wu_w (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vaddwev_d_wu_w (_1, _2); ++} ++v4i32 ++__lsx_vaddwev_w_hu_h (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vaddwev_w_hu_h (_1, _2); ++} ++v8i16 ++__lsx_vaddwev_h_bu_b (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vaddwev_h_bu_b (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_d_wu_w (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vaddwod_d_wu_w (_1, _2); ++} ++v4i32 ++__lsx_vaddwod_w_hu_h (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vaddwod_w_hu_h (_1, _2); ++} ++v8i16 ++__lsx_vaddwod_h_bu_b (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vaddwod_h_bu_b (_1, _2); ++} ++v2i64 ++__lsx_vsubwev_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsubwev_d_w (_1, _2); ++} ++v4i32 ++__lsx_vsubwev_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsubwev_w_h (_1, _2); ++} ++v8i16 ++__lsx_vsubwev_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsubwev_h_b (_1, _2); ++} ++v2i64 ++__lsx_vsubwod_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsubwod_d_w (_1, _2); ++} ++v4i32 ++__lsx_vsubwod_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsubwod_w_h (_1, _2); ++} ++v8i16 ++__lsx_vsubwod_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsubwod_h_b (_1, _2); ++} ++v2i64 ++__lsx_vsubwev_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vsubwev_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vsubwev_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vsubwev_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vsubwev_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vsubwev_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vsubwod_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vsubwod_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vsubwod_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vsubwod_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vsubwod_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vsubwod_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vaddwev_q_d (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vaddwod_q_d (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vaddwev_q_du (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vaddwod_q_du (_1, _2); ++} ++v2i64 ++__lsx_vsubwev_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsubwev_q_d (_1, _2); ++} ++v2i64 ++__lsx_vsubwod_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsubwod_q_d (_1, _2); ++} ++v2i64 ++__lsx_vsubwev_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vsubwev_q_du (_1, _2); ++} ++v2i64 ++__lsx_vsubwod_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vsubwod_q_du (_1, _2); ++} ++v2i64 ++__lsx_vaddwev_q_du_d (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vaddwev_q_du_d (_1, _2); ++} ++v2i64 ++__lsx_vaddwod_q_du_d (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vaddwod_q_du_d (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmulwev_d_w (_1, _2); ++} ++v4i32 ++__lsx_vmulwev_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmulwev_w_h (_1, _2); ++} ++v8i16 ++__lsx_vmulwev_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmulwev_h_b (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_d_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmulwod_d_w (_1, _2); ++} ++v4i32 ++__lsx_vmulwod_w_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmulwod_w_h (_1, _2); ++} ++v8i16 ++__lsx_vmulwod_h_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmulwod_h_b (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmulwev_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vmulwev_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmulwev_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vmulwev_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmulwev_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_d_wu (v4u32 _1, v4u32 _2) ++{ ++ return __builtin_lsx_vmulwod_d_wu (_1, _2); ++} ++v4i32 ++__lsx_vmulwod_w_hu (v8u16 _1, v8u16 _2) ++{ ++ return __builtin_lsx_vmulwod_w_hu (_1, _2); ++} ++v8i16 ++__lsx_vmulwod_h_bu (v16u8 _1, v16u8 _2) ++{ ++ return __builtin_lsx_vmulwod_h_bu (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_d_wu_w (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmulwev_d_wu_w (_1, _2); ++} ++v4i32 ++__lsx_vmulwev_w_hu_h (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmulwev_w_hu_h (_1, _2); ++} ++v8i16 ++__lsx_vmulwev_h_bu_b (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmulwev_h_bu_b (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_d_wu_w (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vmulwod_d_wu_w (_1, _2); ++} ++v4i32 ++__lsx_vmulwod_w_hu_h (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vmulwod_w_hu_h (_1, _2); ++} ++v8i16 ++__lsx_vmulwod_h_bu_b (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vmulwod_h_bu_b (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmulwev_q_d (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmulwod_q_d (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmulwev_q_du (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_q_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vmulwod_q_du (_1, _2); ++} ++v2i64 ++__lsx_vmulwev_q_du_d (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmulwev_q_du_d (_1, _2); ++} ++v2i64 ++__lsx_vmulwod_q_du_d (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vmulwod_q_du_d (_1, _2); ++} ++v2i64 ++__lsx_vhaddw_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vhaddw_q_d (_1, _2); ++} ++v2u64 ++__lsx_vhaddw_qu_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vhaddw_qu_du (_1, _2); ++} ++v2i64 ++__lsx_vhsubw_q_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vhsubw_q_d (_1, _2); ++} ++v2u64 ++__lsx_vhsubw_qu_du (v2u64 _1, v2u64 _2) ++{ ++ return __builtin_lsx_vhsubw_qu_du (_1, _2); ++} ++v2i64 ++__lsx_vmaddwev_d_w (v2i64 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmaddwev_d_w (_1, _2, _3); ++} ++v4i32 ++__lsx_vmaddwev_w_h (v4i32 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmaddwev_w_h (_1, _2, _3); ++} ++v8i16 ++__lsx_vmaddwev_h_b (v8i16 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmaddwev_h_b (_1, _2, _3); ++} ++v2u64 ++__lsx_vmaddwev_d_wu (v2u64 _1, v4u32 _2, v4u32 _3) ++{ ++ return __builtin_lsx_vmaddwev_d_wu (_1, _2, _3); ++} ++v4u32 ++__lsx_vmaddwev_w_hu (v4u32 _1, v8u16 _2, v8u16 _3) ++{ ++ return __builtin_lsx_vmaddwev_w_hu (_1, _2, _3); ++} ++v8u16 ++__lsx_vmaddwev_h_bu (v8u16 _1, v16u8 _2, v16u8 _3) ++{ ++ return __builtin_lsx_vmaddwev_h_bu (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwod_d_w (v2i64 _1, v4i32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmaddwod_d_w (_1, _2, _3); ++} ++v4i32 ++__lsx_vmaddwod_w_h (v4i32 _1, v8i16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmaddwod_w_h (_1, _2, _3); ++} ++v8i16 ++__lsx_vmaddwod_h_b (v8i16 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmaddwod_h_b (_1, _2, _3); ++} ++v2u64 ++__lsx_vmaddwod_d_wu (v2u64 _1, v4u32 _2, v4u32 _3) ++{ ++ return __builtin_lsx_vmaddwod_d_wu (_1, _2, _3); ++} ++v4u32 ++__lsx_vmaddwod_w_hu (v4u32 _1, v8u16 _2, v8u16 _3) ++{ ++ return __builtin_lsx_vmaddwod_w_hu (_1, _2, _3); ++} ++v8u16 ++__lsx_vmaddwod_h_bu (v8u16 _1, v16u8 _2, v16u8 _3) ++{ ++ return __builtin_lsx_vmaddwod_h_bu (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwev_d_wu_w (v2i64 _1, v4u32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmaddwev_d_wu_w (_1, _2, _3); ++} ++v4i32 ++__lsx_vmaddwev_w_hu_h (v4i32 _1, v8u16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmaddwev_w_hu_h (_1, _2, _3); ++} ++v8i16 ++__lsx_vmaddwev_h_bu_b (v8i16 _1, v16u8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmaddwev_h_bu_b (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwod_d_wu_w (v2i64 _1, v4u32 _2, v4i32 _3) ++{ ++ return __builtin_lsx_vmaddwod_d_wu_w (_1, _2, _3); ++} ++v4i32 ++__lsx_vmaddwod_w_hu_h (v4i32 _1, v8u16 _2, v8i16 _3) ++{ ++ return __builtin_lsx_vmaddwod_w_hu_h (_1, _2, _3); ++} ++v8i16 ++__lsx_vmaddwod_h_bu_b (v8i16 _1, v16u8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vmaddwod_h_bu_b (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwev_q_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmaddwev_q_d (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwod_q_d (v2i64 _1, v2i64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmaddwod_q_d (_1, _2, _3); ++} ++v2u64 ++__lsx_vmaddwev_q_du (v2u64 _1, v2u64 _2, v2u64 _3) ++{ ++ return __builtin_lsx_vmaddwev_q_du (_1, _2, _3); ++} ++v2u64 ++__lsx_vmaddwod_q_du (v2u64 _1, v2u64 _2, v2u64 _3) ++{ ++ return __builtin_lsx_vmaddwod_q_du (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwev_q_du_d (v2i64 _1, v2u64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmaddwev_q_du_d (_1, _2, _3); ++} ++v2i64 ++__lsx_vmaddwod_q_du_d (v2i64 _1, v2u64 _2, v2i64 _3) ++{ ++ return __builtin_lsx_vmaddwod_q_du_d (_1, _2, _3); ++} ++v16i8 ++__lsx_vrotr_b (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vrotr_b (_1, _2); ++} ++v8i16 ++__lsx_vrotr_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vrotr_h (_1, _2); ++} ++v4i32 ++__lsx_vrotr_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vrotr_w (_1, _2); ++} ++v2i64 ++__lsx_vrotr_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vrotr_d (_1, _2); ++} ++v2i64 ++__lsx_vadd_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vadd_q (_1, _2); ++} ++v2i64 ++__lsx_vsub_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsub_q (_1, _2); ++} ++v16i8 ++__lsx_vldrepl_b (void *_1) ++{ ++ return __builtin_lsx_vldrepl_b (_1, 1); ++} ++v8i16 ++__lsx_vldrepl_h (void *_1) ++{ ++ return __builtin_lsx_vldrepl_h (_1, 2); ++} ++v4i32 ++__lsx_vldrepl_w (void *_1) ++{ ++ return __builtin_lsx_vldrepl_w (_1, 4); ++} ++v2i64 ++__lsx_vldrepl_d (void *_1) ++{ ++ return __builtin_lsx_vldrepl_d (_1, 8); ++} ++v16i8 ++__lsx_vmskgez_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmskgez_b (_1); ++} ++v16i8 ++__lsx_vmsknz_b (v16i8 _1) ++{ ++ return __builtin_lsx_vmsknz_b (_1); ++} ++v8i16 ++__lsx_vexth_h_b (v16i8 _1) ++{ ++ return __builtin_lsx_vexth_h_b (_1); ++} ++v4i32 ++__lsx_vexth_w_h (v8i16 _1) ++{ ++ return __builtin_lsx_vexth_w_h (_1); ++} ++v2i64 ++__lsx_vexth_d_w (v4i32 _1) ++{ ++ return __builtin_lsx_vexth_d_w (_1); ++} ++v2i64 ++__lsx_vexth_q_d (v2i64 _1) ++{ ++ return __builtin_lsx_vexth_q_d (_1); ++} ++v8u16 ++__lsx_vexth_hu_bu (v16u8 _1) ++{ ++ return __builtin_lsx_vexth_hu_bu (_1); ++} ++v4u32 ++__lsx_vexth_wu_hu (v8u16 _1) ++{ ++ return __builtin_lsx_vexth_wu_hu (_1); ++} ++v2u64 ++__lsx_vexth_du_wu (v4u32 _1) ++{ ++ return __builtin_lsx_vexth_du_wu (_1); ++} ++v2u64 ++__lsx_vexth_qu_du (v2u64 _1) ++{ ++ return __builtin_lsx_vexth_qu_du (_1); ++} ++v16i8 ++__lsx_vrotri_b (v16i8 _1) ++{ ++ return __builtin_lsx_vrotri_b (_1, 1); ++} ++v8i16 ++__lsx_vrotri_h (v8i16 _1) ++{ ++ return __builtin_lsx_vrotri_h (_1, 1); ++} ++v4i32 ++__lsx_vrotri_w (v4i32 _1) ++{ ++ return __builtin_lsx_vrotri_w (_1, 1); ++} ++v2i64 ++__lsx_vrotri_d (v2i64 _1) ++{ ++ return __builtin_lsx_vrotri_d (_1, 1); ++} ++v2i64 ++__lsx_vextl_q_d (v2i64 _1) ++{ ++ return __builtin_lsx_vextl_q_d (_1); ++} ++v16i8 ++__lsx_vsrlni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrlni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vsrlni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrlni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vsrlni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrlni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vsrlni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrlni_d_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vsrlrni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrlrni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vsrlrni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrlrni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vsrlrni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrlrni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vsrlrni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrlrni_d_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrlni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrlni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vssrlni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vssrlni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vssrlni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlni_d_q (_1, _2, 1); ++} ++v16u8 ++__lsx_vssrlni_bu_h (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrlni_bu_h (_1, _2, 1); ++} ++v8u16 ++__lsx_vssrlni_hu_w (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlni_hu_w (_1, _2, 1); ++} ++v4u32 ++__lsx_vssrlni_wu_d (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlni_wu_d (_1, _2, 1); ++} ++v2u64 ++__lsx_vssrlni_du_q (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlni_du_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrlrni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrlrni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vssrlrni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlrni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vssrlrni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlrni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vssrlrni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlrni_d_q (_1, _2, 1); ++} ++v16u8 ++__lsx_vssrlrni_bu_h (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrlrni_bu_h (_1, _2, 1); ++} ++v8u16 ++__lsx_vssrlrni_hu_w (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlrni_hu_w (_1, _2, 1); ++} ++v4u32 ++__lsx_vssrlrni_wu_d (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlrni_wu_d (_1, _2, 1); ++} ++v2u64 ++__lsx_vssrlrni_du_q (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlrni_du_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vsrani_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrani_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vsrani_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrani_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vsrani_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrani_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vsrani_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrani_d_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vsrarni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vsrarni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vsrarni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vsrarni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vsrarni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vsrarni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vsrarni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vsrarni_d_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrani_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrani_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vssrani_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrani_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vssrani_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrani_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vssrani_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrani_d_q (_1, _2, 1); ++} ++v16u8 ++__lsx_vssrani_bu_h (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrani_bu_h (_1, _2, 1); ++} ++v8u16 ++__lsx_vssrani_hu_w (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrani_hu_w (_1, _2, 1); ++} ++v4u32 ++__lsx_vssrani_wu_d (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrani_wu_d (_1, _2, 1); ++} ++v2u64 ++__lsx_vssrani_du_q (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrani_du_q (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrarni_b_h (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrarni_b_h (_1, _2, 1); ++} ++v8i16 ++__lsx_vssrarni_h_w (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrarni_h_w (_1, _2, 1); ++} ++v4i32 ++__lsx_vssrarni_w_d (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrarni_w_d (_1, _2, 1); ++} ++v2i64 ++__lsx_vssrarni_d_q (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrarni_d_q (_1, _2, 1); ++} ++v16u8 ++__lsx_vssrarni_bu_h (v16u8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vssrarni_bu_h (_1, _2, 1); ++} ++v8u16 ++__lsx_vssrarni_hu_w (v8u16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrarni_hu_w (_1, _2, 1); ++} ++v4u32 ++__lsx_vssrarni_wu_d (v4u32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrarni_wu_d (_1, _2, 1); ++} ++v2u64 ++__lsx_vssrarni_du_q (v2u64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrarni_du_q (_1, _2, 1); ++} ++v4i32 ++__lsx_vpermi_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vpermi_w (_1, _2, 1); ++} ++v16i8 ++__lsx_vld (void *_1) ++{ ++ return __builtin_lsx_vld (_1, 1); ++} ++void ++__lsx_vst (v16i8 _1, void *_2) ++{ ++ return __builtin_lsx_vst (_1, _2, 1); ++} ++v16i8 ++__lsx_vssrlrn_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrlrn_b_h (_1, _2); ++} ++v8i16 ++__lsx_vssrlrn_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrlrn_h_w (_1, _2); ++} ++v4i32 ++__lsx_vssrlrn_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrlrn_w_d (_1, _2); ++} ++v16i8 ++__lsx_vssrln_b_h (v8i16 _1, v8i16 _2) ++{ ++ return __builtin_lsx_vssrln_b_h (_1, _2); ++} ++v8i16 ++__lsx_vssrln_h_w (v4i32 _1, v4i32 _2) ++{ ++ return __builtin_lsx_vssrln_h_w (_1, _2); ++} ++v4i32 ++__lsx_vssrln_w_d (v2i64 _1, v2i64 _2) ++{ ++ return __builtin_lsx_vssrln_w_d (_1, _2); ++} ++v16i8 ++__lsx_vorn_v (v16i8 _1, v16i8 _2) ++{ ++ return __builtin_lsx_vorn_v (_1, _2); ++} ++v2i64 ++__lsx_vldi () ++{ ++ return __builtin_lsx_vldi (1); ++} ++v16i8 ++__lsx_vshuf_b (v16i8 _1, v16i8 _2, v16i8 _3) ++{ ++ return __builtin_lsx_vshuf_b (_1, _2, _3); ++} ++v16i8 ++__lsx_vldx (void *_1) ++{ ++ return __builtin_lsx_vldx (_1, 1); ++} ++void ++__lsx_vstx (v16i8 _1, void *_2) ++{ ++ return __builtin_lsx_vstx (_1, _2, 1); ++} ++v2u64 ++__lsx_vextl_qu_du (v2u64 _1) ++{ ++ return __builtin_lsx_vextl_qu_du (_1); ++} ++int ++__lsx_bnz_b (v16u8 _1) ++{ ++ return __builtin_lsx_bnz_b (_1); ++} ++int ++__lsx_bnz_d (v2u64 _1) ++{ ++ return __builtin_lsx_bnz_d (_1); ++} ++int ++__lsx_bnz_h (v8u16 _1) ++{ ++ return __builtin_lsx_bnz_h (_1); ++} ++int ++__lsx_bnz_v (v16u8 _1) ++{ ++ return __builtin_lsx_bnz_v (_1); ++} ++int ++__lsx_bnz_w (v4u32 _1) ++{ ++ return __builtin_lsx_bnz_w (_1); ++} ++int ++__lsx_bz_b (v16u8 _1) ++{ ++ return __builtin_lsx_bz_b (_1); ++} ++int ++__lsx_bz_d (v2u64 _1) ++{ ++ return __builtin_lsx_bz_d (_1); ++} ++int ++__lsx_bz_h (v8u16 _1) ++{ ++ return __builtin_lsx_bz_h (_1); ++} ++int ++__lsx_bz_v (v16u8 _1) ++{ ++ return __builtin_lsx_bz_v (_1); ++} ++int ++__lsx_bz_w (v4u32 _1) ++{ ++ return __builtin_lsx_bz_w (_1); ++} ++v2i64 ++__lsx_vfcmp_caf_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_caf_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_caf_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_caf_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_ceq_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_ceq_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_ceq_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_ceq_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cle_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cle_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cle_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cle_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_clt_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_clt_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_clt_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_clt_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cne_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cne_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cne_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cne_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cor_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cor_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cor_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cor_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cueq_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cueq_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cueq_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cueq_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cule_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cule_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cule_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cule_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cult_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cult_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cult_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cult_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cun_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cun_d (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_cune_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_cune_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cune_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cune_s (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_cun_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_cun_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_saf_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_saf_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_saf_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_saf_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_seq_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_seq_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_seq_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_seq_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sle_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sle_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sle_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sle_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_slt_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_slt_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_slt_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_slt_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sne_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sne_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sne_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sne_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sor_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sor_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sor_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sor_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sueq_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sueq_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sueq_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sueq_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sule_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sule_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sule_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sule_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sult_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sult_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sult_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sult_s (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sun_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sun_d (_1, _2); ++} ++v2i64 ++__lsx_vfcmp_sune_d (v2f64 _1, v2f64 _2) ++{ ++ return __builtin_lsx_vfcmp_sune_d (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sune_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sune_s (_1, _2); ++} ++v4i32 ++__lsx_vfcmp_sun_s (v4f32 _1, v4f32 _2) ++{ ++ return __builtin_lsx_vfcmp_sun_s (_1, _2); ++} ++v16i8 ++__lsx_vrepli_b () ++{ ++ return __builtin_lsx_vrepli_b (1); ++} ++v2i64 ++__lsx_vrepli_d () ++{ ++ return __builtin_lsx_vrepli_d (1); ++} ++v8i16 ++__lsx_vrepli_h () ++{ ++ return __builtin_lsx_vrepli_h (1); ++} ++v4i32 ++__lsx_vrepli_w () ++{ ++ return __builtin_lsx_vrepli_w (1); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-sad.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-sad.c +new file mode 100644 +index 000000000..b92110a8b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-sad.c +@@ -0,0 +1,20 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlsx" } */ ++ ++#define N 1024 ++ ++#define TEST(SIGN) \ ++ SIGN char a_##SIGN[N], b_##SIGN[N]; \ ++ int f_##SIGN (void) \ ++ { \ ++ int i, sum = 0; \ ++ for (i = 0; i < N; i++) \ ++ sum += __builtin_abs (a_##SIGN[i] - b_##SIGN[i]);; \ ++ return sum; \ ++ } ++ ++TEST(signed); ++TEST(unsigned); ++ ++/* { dg-final { scan-assembler {\tvabsd.bu\t} } } */ ++/* { dg-final { scan-assembler {\tvabsd.b\t} } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c +new file mode 100644 +index 000000000..e336581f3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c +@@ -0,0 +1,272 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfda9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000700000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0014fff500000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f03000780000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f15000a7f010101; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000127fffffea; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f0101070101010f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000127f010116; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67eb85af0000b000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128i_result[0]) = 0x387c7e0a133f2000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff7fffefffa01ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffbfffefffe01ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0305030203020502; ++ *((unsigned long *)&__m128i_result[0]) = 0x0301030203020502; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4ee376188658d85f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5728dcc85ac760d2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4e1d76187a58285f; ++ *((unsigned long *)&__m128i_result[0]) = 0x572824385a39602e; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a545374471b7070; ++ *((unsigned long *)&__m128i_result[0]) = 0x274f4f0648145f50; ++ __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21f32eafa486fd38; ++ *((unsigned long *)&__m128i_op0[0]) = 0x407c2ca3d3430357; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x21f32eaf5b7a02c8; ++ *((unsigned long *)&__m128i_result[0]) = 0x407c2ca32cbd0357; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003bfb4000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003bfb4000; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100010001; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffdf; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000021; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000700000004e000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000000012020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0038000000051fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x003c000000022021; ++ __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9d9b9bbfaa20e9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbe081c963e6fee68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363636463abdf17; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e08016161198; ++ __m128i_out = __lsx_vabsd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01fe0400000006; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005fffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe01fc0005fff4; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x010003f00000ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x017f03000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x010003f00000ff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x017f03000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000001fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000001ffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffac0a000000; ++ __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c +new file mode 100644 +index 000000000..c1af80e14 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0024d8f6a494006a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x5641127843c0d41e; ++ *((unsigned long *)&__m128i_result[0]) = 0xfedb27095b6bff95; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000383ffff1fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ca354688; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000038335ca2777; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fff80000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001fd0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001fd0; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ff08ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ff08ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefff00000001fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffe1ffc100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe1ffc100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefff00000401fff; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff000000ff000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff000000ff000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff000000ff000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000000ff000000; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op0[0]) = 0x545cab1d7e57c415; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_result[0]) = 0x545cab1d81a83bea; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcfb799f1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0282800002828282; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5555001400005111; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffabbeab55110140; ++ *((unsigned long *)&__m128i_result[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long *)&__m128i_result[0]) = 0xfd293eab528e7ebe; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128i_op0[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128i_result[0]) = 0xb4b8122ef4054bb3; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xc39fffff007fffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00fd; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8006000080020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8004000080020000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8006000080020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8004000080020000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0015172b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffb00151727; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffbfffffff8; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffbfffffff8; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffdc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffbffffffd8; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffbfffffff8; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffff9; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x64b680a2ae3af8c8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x161c0c363c200824; ++ *((unsigned long *)&__m128i_result[1]) = 0x23b57fa16d39f7c8; ++ *((unsigned long *)&__m128i_result[0]) = 0x161c0c363c200824; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fffff0000000000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0bd80bd80bd8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf0bd80bd80bd8000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffef8; ++ *((unsigned long *)&__m128i_result[0]) = 0xffdfffdfffdffee0; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c +new file mode 100644 +index 000000000..7cfb989e4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c +@@ -0,0 +1,416 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002010000fc000b; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000017fda829; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001fffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f7f7f7f00107f04; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f0000fd7f0000fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x7e7e7e7eff0f7f04; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f0000fd7f01fffb; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6a1a3fbb3c90260e; ++ *((unsigned long *)&__m128i_result[1]) = 0x19df307a5d04acbb; ++ *((unsigned long *)&__m128i_result[0]) = 0x5ed032b06bde1ab6; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5555001400005111; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffabbeab55110140; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5555001400005111; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffabbeab55110140; ++ *((unsigned long *)&__m128i_result[1]) = 0xaaaa00280000a222; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe567c56aa220280; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_result[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_result[0]) = 0x0982e2daf234ed87; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000002a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000049000000c0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffff29; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000bd30; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d7fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007a6d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000dfefe0000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefa000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0038000000051fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003c000000022021; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffa0204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f370101ff04ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f3bffffa0226021; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1baf8eabd26bc629; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1c2640b9a8e9fb49; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002dab8746acf8e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00036dd1c5c15856; ++ *((unsigned long *)&__m128i_result[1]) = 0x1bb1686346d595b7; ++ *((unsigned long *)&__m128i_result[0]) = 0x1c29ad8a6daa539f; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfeffffffffff0002; ++ __m128i_out = __lsx_vadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000c3080000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff81ffffc3080000; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x004200a000200001; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001f0000001f; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0029aeaca57d74e6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xdbe332365392c686; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000056f64adb9464; ++ *((unsigned long *)&__m128i_op1[0]) = 0x29ca096f235819c2; ++ *((unsigned long *)&__m128i_result[1]) = 0x002a05a2f059094a; ++ *((unsigned long *)&__m128i_result[0]) = 0x05ad3ba576eae048; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000040d; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001000000ff; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000002fffffffb; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000fffb; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001201fe01e9; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001201fe01e9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000c0000001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002403fc03d2; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff1000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff1000100010001; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_result[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_result[0]) = 0xa352bfac9269e0aa; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001001100110068; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001001100110067; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3789f68000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3789f68000000000; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000555889; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002580f01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00060fbf02040fbf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00020fbf02000fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x00060fbf02596848; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020fbf04581ec0; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001388928513889; ++ *((unsigned long *)&__m128i_op0[0]) = 0x006938094a013889; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001388928513889; ++ *((unsigned long *)&__m128i_op1[0]) = 0x006938094a013889; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002711250a27112; ++ *((unsigned long *)&__m128i_result[0]) = 0x00d2701294027112; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_result[1]) = 0x202544f490f2de35; ++ *((unsigned long *)&__m128i_result[0]) = 0x202544f490f2de35; ++ __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c +new file mode 100644 +index 000000000..4bb699eab +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[0]) = 0x52527d7d52527d7d; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffc001f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010202050120; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010102020202; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000700020005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000700020005; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f8000004f800000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000300030004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000300030004; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5c9c9c9ce3636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x63635c9e63692363; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf0fd800080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000a00028004000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6b9fe3649c9d6363; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363bc9e8b696363; ++ __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111113111111131; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111113111111131; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000006a9a5c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000092444; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000006a9a5c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000092444; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000d4ccb8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000124888; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x76f424887fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff082f000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000f7d1000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x773324887fffffff; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5a6f5c53ebed3faa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa36aca4435b8b8e1; ++ *((unsigned long *)&__m128i_result[1]) = 0x5a6f61865d36d3aa; ++ *((unsigned long *)&__m128i_result[0]) = 0x7bea6962a0bfb621; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000008140c80; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000fffe0000ff45; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff000000b9; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffd5002affffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x343d8dc6b0ed5a08; ++ *((unsigned long *)&__m128i_result[1]) = 0x012b012c01010246; ++ *((unsigned long *)&__m128i_result[0]) = 0x353e743b50135a4f; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003c853c843c87e; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000200000002001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000001fff0021; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010109; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000004442403e4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000044525043c; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000208000002080; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000003f0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000003f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x803e0000803e0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x803e0000803e0000; ++ __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000008000; ++ __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff9000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc000400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007001400000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004001000000000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefeff00fefeff00; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000020300000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000044470000; ++ __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff01ff01ac025c87; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff01ff01ac465ca1; ++ *((unsigned long *)&__m128i_result[1]) = 0x64616462b76106dc; ++ *((unsigned long *)&__m128i_result[0]) = 0x64616462b71d06c2; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_result[1]) = 0x0051005200510052; ++ *((unsigned long *)&__m128i_result[0]) = 0x0051005200510052; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4480000044800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x45c0000044800000; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363636463636363; ++ __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c +new file mode 100644 +index 000000000..77afabe92 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c +@@ -0,0 +1,251 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x1414141414141415; ++ *((unsigned long *)&__m128i_result[0]) = 0x1414141414141415; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0505050505050505; ++ *((unsigned long *)&__m128i_result[0]) = 0x0505050504040404; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f1f1f1f27332b9f; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[0]) = 0x0303030303030304; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8f8f8f8f8f8f8f8f; ++ *((unsigned long *)&__m128i_result[0]) = 0x8f8f8f8f8f8f8f8f; ++ __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_result[0]) = 0x0018001800180018; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0019081900190019; ++ *((unsigned long *)&__m128i_result[0]) = 0x0019081900190019; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc1000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffcc000b000b000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000b010a000b; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001f001f001f001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x001f001f001f001f; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001c001c001c001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x001c001c001c001c; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_result[0]) = 0xc89d7f0fed582019; ++ __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000a0000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000a0000000a; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000090100000a; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe009ffff2008; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000300000003; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128i_result[1]) = 0xfc01fd13fc02fe0c; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe00fd14fe01fd16; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000c7fff000c; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000500000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000005fffe0006; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fffffeff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000009ffffff08; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000900000009; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x55aa55aa55aa55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa55555655aaaaa8; ++ *((unsigned long *)&__m128i_result[1]) = 0x55aa55c355aa55c4; ++ *((unsigned long *)&__m128i_result[0]) = 0xaa55556f55aaaac1; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000e0000002e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000004e; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f000400000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f000400000003; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff8000010f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80000a0f800009; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_result[0]) = 0x020310edc003023d; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd7059f7fd70; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_result[0]) = 0x59f7fd8759f7fd87; ++ __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6420e0208400c4c4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x20c4e0c4e0da647a; ++ *((unsigned long *)&__m128i_result[1]) = 0x6420e0208400c4e3; ++ *((unsigned long *)&__m128i_result[0]) = 0x20c4e0c4e0da6499; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d001b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x21201f1e1d001b25; ++ *((unsigned long *)&__m128i_result[0]) = 0x191817161514131d; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff9411; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007770ffff941d; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000016; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000080000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vaddi_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c +new file mode 100644 +index 000000000..b7b16a325 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fffffff80000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003ffd000a4000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffd000a0000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000049ffffff4d; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff01ffffffff; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000005e695e95; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5e695e96c396b402; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000005e94; ++ *((unsigned long *)&__m128i_result[0]) = 0x00005e96ffffb402; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffb; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000100000000fc; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000158; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000005d5d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000005d5d; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5c9c9c9ce3636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x63635c9e63692363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffe3636363; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000063692363; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0202020202020203; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0202020202020203; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000002020202; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002020202; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x76f424887fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000017161515; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000095141311; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdfef9ff0efff900; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfd000000fb00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fe00f8000700; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fb01; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000007000000; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000080806362; ++ *((unsigned long *)&__m128i_op1[0]) = 0x807f808000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80806362; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000010002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff960015; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010002; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffff960015; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000047e59090; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffb8145f50; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff008ff820; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff008ff820; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000011ff040; ++ __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010001fffd; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc2ffe700000007; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffc100010001; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80df00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000d46cdc13; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe813f00fe813f00; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c +new file mode 100644 +index 000000000..a407cadfb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x061006100613030c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4d6814ef9c77ce46; ++ *((unsigned long *)&__m128i_result[1]) = 0x010f010f0112010b; ++ *((unsigned long *)&__m128i_result[0]) = 0x016701ee01760145; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ac00000000; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf589caff5605f2fa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000eb00ab; ++ *((unsigned long *)&__m128i_result[0]) = 0x017400ff004500fa; ++ __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000017d7000001e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x000016d10000012b; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001c8520000c97d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001c8520001c87d; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000085af0000b000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00017ea200002000; ++ __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000024; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000024; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00307028003f80b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040007fff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000003f80b0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff800000; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff80ffffff80ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000018080807f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffff80fe; ++ __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff8000000000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000180100100000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001801b5307f80; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff8007; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffa4fb6021a41f7e; ++ __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c +new file mode 100644 +index 000000000..4d5c60998 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007005200440062; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080005e007f00d8; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe6d4572c8a5835bc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe5017c2ac9ca9fd0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012b015700bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001002affca0070; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363771163631745; ++ *((unsigned long *)&__m128i_op1[0]) = 0x636363ec6363636c; ++ *((unsigned long *)&__m128i_result[1]) = 0x006300fb00630143; ++ *((unsigned long *)&__m128i_result[0]) = 0x0063ffec0063006c; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080ffffffff8080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00008080ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ffffffffff80; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff80ffffffff; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00197f26cb658837; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01009aa4a301084b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_result[1]) = 0x0037ffd40083ffe5; ++ *((unsigned long *)&__m128i_result[0]) = 0x001e0052001ffff9; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00ffffff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000090900000998; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000900ffff98; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff800000; ++ __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1e0200001e020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffdfffcfffd; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffcfffffffd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffdfffffffd; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010100000101; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa2f54a1ea2f54a1e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x00004a1e00004a1e; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000868686868686; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000868600008785; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe363636363abdf16; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000cecd00004657; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000c90000011197; ++ __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001000f000e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fff1000ffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000f000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000ffffe; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c07e181ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3430af9effffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00ff; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00060012000e002b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000049ffffffaa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000e002b; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffaa; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000bfffffffe0f6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff7a53; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff7f80ffff7f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff7f80ffff7f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff7f80ffff7f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff7f80ffff7f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffeff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffeff00; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000003dffc2; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080006b0000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000055555555; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff7f810100001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000400530050ffa6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff007fff810001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000400530050ffa6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffff811001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000a1ff4c; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000008000001e; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9611c3985b3159f5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000035697d4e; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000013ecaadf2; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ef00ff010f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff010f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc1f03e1042208410; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000110; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000431f851f; ++ __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000030000003f; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffbfffffffbe; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x06b1213ef1efa299; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8312f5424ca4a07f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1f1f1f1f1f1f1f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f1f1f27332b9f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xa23214697fd03f7f; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80000000ffffd860; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff80000000; ++ __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c +new file mode 100644 +index 000000000..0ebe8c8a9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c +@@ -0,0 +1,408 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ca354688; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_result[1]) = 0x00040003ff83ff84; ++ *((unsigned long *)&__m128i_result[0]) = 0x00040003ff4dffca; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001f5400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001f00000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000f80007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xb); ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff0100ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffeffff; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x478b478b38031779; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6b769e690fa1e119; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001030103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0047004700380017; ++ *((unsigned long *)&__m128i_result[0]) = 0x006bff9e0010ffe2; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128i_result[0]) = 0xff76ffd8ffe6ffaa; ++ __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001f5400000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffd70b00006ea9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffa352ffff9269; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffd70b00006ea9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffa352ffff9269; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8144ffff01c820a4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9b2ee1a4034b4e34; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff80c400000148; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff80c1ffffe8de; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffffffe; ++ __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa486c90f6537b8d7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x58bcc2013ea1cc1e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffa486c90f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000058bcc201; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001802041b0014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003004; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff02000200; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffdfff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffdfff; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fbf83468; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fbf83468; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff82bb9784; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc6bb97ac; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007ffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001000fbff9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002ff9afef; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000004f804f81; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000004f804f80; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000fff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe00029f9f6061; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x64e464e464e464e4; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffeffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000064e264e6; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0305030203020502; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0301030203020502; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000003050302; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000003010302; ++ __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01fc020000fe0100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000ff0000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00a6ffceffb60052; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff84fff4ff84fff4; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fefefe6a; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5a57bacbd7e39680; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6bae051ffed76001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf3e6586b60d7b152; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf7077b934ac0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4e3e133738bb47d2; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000117d00007f7b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000093d0000187f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7d7f027f7c7f7c79; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7e7f7e7f027f032f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7d7f13fc7c7ffbf4; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c +new file mode 100644 +index 000000000..379517f39 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x08fdc221bfdb1927; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4303c67e9b7fb213; ++ *((unsigned long *)&__m128i_op1[1]) = 0x08fdc221bfdb1927; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4303c67e9b7fb213; ++ *((unsigned long *)&__m128i_result[1]) = 0x00100184017e0032; ++ *((unsigned long *)&__m128i_result[0]) = 0x0086018c01360164; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff77777807777775; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe6eeef00eeeeeebf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000f00f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff007700070077; ++ *((unsigned long *)&__m128i_result[0]) = 0x00e600ef00ee01de; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4429146a7b4c88b2; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe22b3595efa4aa0c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000442900007b4c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000e22b0000efa4; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000600000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000636500006363; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080800000808; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe0001fefc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffe0001fefc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff8000010f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff8000010f78; ++ __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffc01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffc01; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffffe; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006e17bfd8; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f000400000003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f000400000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000400004; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000003f0004; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000017f800001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000017f800001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007f800001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007f800001; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3789f68000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x379674c000000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c +new file mode 100644 +index 000000000..30dc83518 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c +@@ -0,0 +1,237 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000a16316b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x16161616a16316b0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ffffa10016; ++ *((unsigned long *)&__m128i_result[0]) = 0x01150115ffa10016; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007e007e007e007e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000200020; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000003f; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000fe00fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe00fe00fe00fe; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000011ffee; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000dfff2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00e0000000e00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000e0000000e0; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff7100fffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ffffa10016; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01150115ffa10016; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100fe000070a1; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000115ffffffa1; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffe0000fffe; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000001000f00fe00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000017fff00fe7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffff00; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04faf60009f5f092; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04fafa9200000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff9fffefff9ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000004fa000009f5; ++ *((unsigned long *)&__m128i_result[0]) = 0x000004f3fffffff9; ++ __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000c2f90000bafa; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000003fffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001fff00001fff; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000807bf0a1f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000800ecedee68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128i_op1[0]) = 0x110053f401e7cced; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x5847bf2de5d8816f; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000155; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c +new file mode 100644 +index 000000000..1597749b5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c +@@ -0,0 +1,159 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000001010000; ++ *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x001fffff001fffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x001fffff001fffff; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000003dffc2; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0008000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9515; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long*)& __m128i_result[1]) = 0x67eb85af0000b000; ++ *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0313100003131000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0007000000050000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0003000100010001; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00007a8000000480; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000485000004cc; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c +new file mode 100644 +index 000000000..906da69ca +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c +@@ -0,0 +1,67 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x36); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x39); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x27); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x25); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0xbd); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000a95afc60a5c5; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000b6e414157f84; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000204264602444; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000266404046604; ++ __m128i_out = __lsx_vandi_b(__m128i_op0,0x66); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c +new file mode 100644 +index 000000000..3ae2d7694 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c +@@ -0,0 +1,129 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000210011084; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000049000000c0; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000049000000c0; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff29; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x010f00000111fffc; ++ *((unsigned long*)& __m128i_op0[0]) = 0x016700dc0176003a; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003000000010000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0002000000010000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffff000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long*)& __m128i_result[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long*)& __m128i_result[0]) = 0xf0bc9a5278285a4a; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0004fffe0004; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c +new file mode 100644 +index 000000000..2177ca3f6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4050000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x2028000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000014155445; ++ *((unsigned long *)&__m128i_result[1]) = 0x33f5c2d7d9f5d800; ++ *((unsigned long *)&__m128i_result[0]) = 0xe4c23ffb002a3a22; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000f000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000ffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x003fffff00070007; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000007ffff; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400028000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000020001c020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000022; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x08080807f5f5f5f8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x04040403fafafafc; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff80; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f8000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001000010f8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x087c000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000087c; ++ __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5641127843c0d41e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfedb27095b6bff95; ++ *((unsigned long *)&__m128i_op1[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0024d8f6a494006a; ++ *((unsigned long *)&__m128i_result[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff7fffffffffffff; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00007fff; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80ff0010ff06; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf6fd377cf705f680; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0000000bfff8000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x1ff800000000477f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000015fec9b0; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000037; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003fffff00000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000008000; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long *)&__m128i_op0[0]) = 0x34b8122ef4054bb3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xeb504f33155a3598; ++ *((unsigned long *)&__m128i_result[0]) = 0x1a5c0917fa02a5d9; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffefffff784; ++ *((unsigned long *)&__m128i_result[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff008ff820; ++ __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0014; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000c01020d8009; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fff8000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001008100000005; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe00fe8980000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff007e8a7ffc7e00; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff46000000ba; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffa30000005c; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000070007; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000007ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000068; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000038003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000040033; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff0000ac26; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80005613; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f800000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000040000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000040000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fc000005fc00000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc000005fc00000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0000ffff; ++ __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c +new file mode 100644 +index 000000000..1b0d879e4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c +@@ -0,0 +1,308 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000100000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37b951002d81a921; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000047404f4f040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000082000000826; ++ *((unsigned long *)&__m128i_result[0]) = 0x1b5c4c203e685617; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_result[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_result[0]) = 0x00a975be00accf03; ++ __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff732a; ++ *((unsigned long *)&__m128i_result[1]) = 0x807f7fff807f807f; ++ *((unsigned long *)&__m128i_result[0]) = 0x807f807f7fff3995; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7ff8; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x353c8cc4b1ec5b09; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080008000808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x1a9e466258f62d84; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ac; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x4e4e4e4e00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000868686868686; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1e1e1e1e1e1e1e1e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e1e1e1e1e1e1e1e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0f0f0f0f0f0f0f0f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f0f525252525252; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a753500950fa306; ++ __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff00010000fff; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000002ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000017fffffff; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101030100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080800000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080818000008000; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0017004800c400f9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ed001a00580070; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x800b7fe38062007b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0076800d802c0037; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xe01ae8a3fc55dd23; ++ *((unsigned long *)&__m128i_result[0]) = 0xdd9ff64ef9daeace; ++ __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fffffff; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f80000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x1fc0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1fc07f8000007f80; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000043cf26c7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000e31d4cae8636; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000021e79364; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000718ea657431b; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ff8000000000000; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff80ffff7e02; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00feff8000ff80ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf931fd04f832fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0x80007fc000003f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x7d187e427c993f80; ++ __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c +new file mode 100644 +index 000000000..4b7262537 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c +@@ -0,0 +1,299 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0040000000ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020c00000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long *)&__m128i_op0[0]) = 0x800000005b4b1b18; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xdcfe1b20f2f60e0c; ++ *((unsigned long *)&__m128i_result[0]) = 0xc00000002e260e0c; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7bffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x060808ff08080820; ++ *((unsigned long *)&__m128i_result[0]) = 0x4608081808080810; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000fff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ac26; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000003000000d613; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c0000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff2; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff2; ++ __m128i_out = __lsx_vavgr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000002a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000003a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000015; ++ __m128i_out = __lsx_vavgr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff8004000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc002000000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc002000000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000007fff0018; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003fff800c; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0280000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7500000075000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7500000075000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3bc000003a800000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007d1800007c99; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0a0000001e000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a621b3ebe5e1c02; ++ *((unsigned long *)&__m128i_result[1]) = 0x04ffc0000f000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x05314c2bdf2f4c4e; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000000; ++ __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc000003fc00000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fffffffc0000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xff807f807f807f80; ++ *((unsigned long *)&__m128i_result[0]) = 0xff807f807f807f80; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000280000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000140001; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff46; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe00fe0045; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f007f007f007e; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f007f007effc6; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long *)&__m128i_result[0]) = 0xe4423f7b769f8ffe; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff9dff9dff9dff9d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffceffceffcf1fcb; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x1d4000001d400000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1e5f007f5d400000; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000007f80; ++ __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c +new file mode 100644 +index 000000000..22908b1ea +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c +@@ -0,0 +1,317 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff01018888; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4080808080808080; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000003f; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c0c8b8a8b8b0b0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x8b8a8a898a8a8909; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000208000002080; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffd60015; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80808080806b000b; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff81010102; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc1bdceee242071db; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe8c7b756d76aa578; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0dee7779210b8ed; ++ *((unsigned long *)&__m128i_result[0]) = 0xf463dbabebb5d2bc; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400400004004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000015ff4a31; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2a7b7c9260f90ee2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1b1c6cdfd57f5736; ++ *((unsigned long *)&__m128i_result[1]) = 0x153e3e49307d0771; ++ *((unsigned long *)&__m128i_result[0]) = 0x0d8e36706ac02b9b; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xdd6156076967d8c9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e3ab5266375e71b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x6eb12b0634b46c67; ++ *((unsigned long *)&__m128i_result[0]) = 0x171d5a9531bb7390; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000090900000998; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007a8000000480; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000485000004cc; ++ __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffc00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc001fffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001ff800000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffe800e80000000; ++ __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff000001ffff9515; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fffa9ed; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000017fffca8b; ++ __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffdfffffff8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7ffffffc; ++ __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffeff98; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0014ffe4ff76ffc4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fff7fcc; ++ *((unsigned long *)&__m128i_result[0]) = 0x18a3188b9854187b; ++ __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001c88bf0; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x807fffff80800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x8003000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040ffffc0400004; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101000001000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000008000008080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080800000800080; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c +new file mode 100644 +index 000000000..411dcaa40 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c +@@ -0,0 +1,461 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000e0000000e0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00e0000000e00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000e0000000e0; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff8004000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x19df307a5d04acbb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5ed032b06bde1ab6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x19de307a5d04acba; ++ *((unsigned long *)&__m128i_result[0]) = 0x5ed032b06bde1ab6; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_op1[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd83c8081ffff808f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xd82480697f678077; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000006597cc3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505853d654185f5; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010000fefe0101; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006595cc1d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffe0000fffe0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffe0000fffe0000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff7fc01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80000000fff6fc00; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffef800; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffffffe; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x23b57fa16d39f7c8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x161c0c363c200824; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000fefe00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fefe00000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0bd80bd80bd8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7ffffffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xdfffdfffdffffffe; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000037; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000036; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100000001007c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000010000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefa000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefa000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67b7cf643c9d636a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39d70e366f547977; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x66b34f643c9c626a; ++ *((unsigned long *)&__m128i_result[0]) = 0x38d60e366e547876; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020207f7f; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ef8000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ef8000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000077f97; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffeff7f0000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_result[0]) = 0x685670d27e00682a; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010001000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x5d7f5d007f6a007f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffefffe; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x207fffff22bd04fb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x207fffff22bd04fb; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000002000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000002000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x207fffff22bd04fa; ++ *((unsigned long *)&__m128i_result[0]) = 0x207fffff22bd04fa; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffefffe; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000b81c8382; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000077af9450; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007efe7f7f8000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000004ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c +new file mode 100644 +index 000000000..5d7d66e06 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c +@@ -0,0 +1,279 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000201000000000b; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200000; ++ *((unsigned long *)&__m128i_result[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x004200a000200000; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000efffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002ff5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc2cf2471e9b7d7a4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000027f5; ++ *((unsigned long *)&__m128i_result[0]) = 0xc2cf2471e9b7d7a4; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0x7404443064403aec; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000d6eefefc0498; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x64b680a2ae3af8ca; ++ *((unsigned long *)&__m128i_op0[0]) = 0x161c0c363c200826; ++ *((unsigned long *)&__m128i_result[1]) = 0x64b680a2ae3af8c8; ++ *((unsigned long *)&__m128i_result[0]) = 0x161c0c363c200824; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff807f807f807f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff807f807f807f80; ++ *((unsigned long *)&__m128i_result[1]) = 0xfb807b807b807b80; ++ *((unsigned long *)&__m128i_result[0]) = 0xfb807b807b807b80; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfbffffffffffffff; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9941d1d5f4ba9d08; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x9941d155f43a9d08; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffbfffffffbf; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x03f1e3d28b1a8a1a; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffda6f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffe3d7; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefffffffeffda6f; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefffffffeffe3d7; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080638063; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080638063; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000200008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000200008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200000; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200000001; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xdfdfdfdfdfdfdfdf; ++ *((unsigned long *)&__m128i_result[0]) = 0xdfdfdfdfdfdfdfdf; ++ __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c +new file mode 100644 +index 000000000..ba4f4b6dc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c +@@ -0,0 +1,407 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1b71a083b3dec3cd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x373a13323b4cdbc1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0802010808400820; ++ *((unsigned long *)&__m128i_result[0]) = 0x8004080408100802; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitrev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000501000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010400100203; ++ *((unsigned long *)&__m128i_result[0]) = 0x0103010301020109; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffbe6ed563; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd0b1ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9d519ee8d2d84f1d; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128i_result[0]) = 0xdffdbffeba6f5543; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7da9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x2002040404010420; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010180800101; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffe0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffff0001fffe; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000003f803f4; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100100000; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x040004000400040d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0501050105010501; ++ *((unsigned long *)&__m128i_result[0]) = 0x050105010501050c; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitrev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010001fffe; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffffeffffffff; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0040000000400000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040000000400000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0141010101410101; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x65b780a3ae3bf8cb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x161d0c363c200826; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x65b780a2ae3bf8ca; ++ *((unsigned long *)&__m128i_result[0]) = 0x161d0c373c200827; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe01fe01fe01fe01; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003bfb4000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000021ffffffdf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000e60; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1ff85ffe2ae5d973; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100400100200e68; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001021; ++ *((unsigned long *)&__m128i_result[1]) = 0x0108020410400208; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010102; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff0000ff86; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x010101fe0101fe87; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x343d8dc5b0ed5a08; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x353c8cc4b1ec5b09; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0037ffc8d7ff2800; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ffffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0038d800ff000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fffe00fffffe00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0137ffc9d7fe2801; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f00ff017fffff01; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001200100012001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffe7fffffff; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000010000000; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffdfffcfffdfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffcfffdfffc; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001ffff0101ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0103fefd0303fefd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0103fefd0103fefd; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long *)&__m128i_op1[0]) = 0x17483c07141b5971; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002001000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000020000; ++ __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffefffe; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ce28f9c0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000004e06b0890; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefeeffef7fefe; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003ffffe00800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff810001ff810002; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f804000ff810001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff1affff01001fe0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff1aff6d02834d70; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe1bfefe00011ee1; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe1bfe6c03824c60; ++ __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41945926d8000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00001e5410082727; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f7f00107f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001001001000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x4195d926d8018000; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8100017f810001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8100017f810001; ++ __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x545501550001113a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xd45501550001113a; ++ __m128i_out = __lsx_vbitrev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c +new file mode 100644 +index 000000000..9739182cd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c +@@ -0,0 +1,336 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000400000007004; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfeffffffffffffff; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000400040004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000400040004000; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007fff8000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001008100000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800080077ff8800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0801088108000805; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m128i_result[0]) = 0x0202020202020202; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe86ce7eb5e9ce950; ++ *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m128i_result[0]) = 0xec68e3ef5a98ed54; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000400000204010; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0400040004000400; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x04000400fbfffb02; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000000100000; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x040004000400040d; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000004f804f81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000004f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000004fc04f80; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040004000400040; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m128i_result[0]) = 0xefefefefefefefef; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040404040404040; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_result[0]) = 0x3918371635143312; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x61608654a2d4f6da; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff0800080008000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe160065422d476da; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x77c0401040004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x77c0401040004000; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_result[1]) = 0x75c0404a4200403a; ++ *((unsigned long *)&__m128i_result[0]) = 0x75c03fd642003fc6; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808280808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808280808; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000100fffffeff; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0404050404040404; ++ *((unsigned long *)&__m128i_result[0]) = 0x0404050404040404; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfbfbfbfbfbfbfbf; ++ *((unsigned long *)&__m128i_result[0]) = 0xbfbfbfbfbfbfbfbf; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000040000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000040000000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000020000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2000200020002000; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x441ba9fcffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x181b2541ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x401fadf8fbfbfbfb; ++ *((unsigned long *)&__m128i_result[0]) = 0x1c1f2145fbfbfbfb; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffefff00001000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffefff00001000; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000002000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long *)&__m128i_op0[0]) = 0x34b8122ef4054bb3; ++ *((unsigned long *)&__m128i_result[1]) = 0xd6e09e262af46b71; ++ *((unsigned long *)&__m128i_result[0]) = 0x34f8126ef4454bf3; ++ __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000200008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200000; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefeeffef7feff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcffbdfcfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcedfcf5fcfd; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000555889; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002580f01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010000000455889; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000002480f01; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf00040fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf00000fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x00060fbf02040fbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020fbf02000fbf; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x400000003fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000000040000000; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_result[1]) = 0x00197f26cb658837; ++ *((unsigned long *)&__m128i_result[0]) = 0x01009aa4a301084b; ++ __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000c6c60000c6c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000c6c58000c6b2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000c6c40000c6c6; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000c6c78000c6b2; ++ __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff7fffffff7f; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff7fffffff7f; ++ __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c +new file mode 100644 +index 000000000..52ac9939f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c +@@ -0,0 +1,109 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0505000005050505; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000d02540000007e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001400140014; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0505050505050505; ++ *((unsigned long *)&__m128i_op2[0]) = 0x03574e38e496cbc9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0400001001150404; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080001300000013; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x62cbf84c02cbac00; ++ *((unsigned long *)&__m128i_result[0]) = 0x1014120210280240; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff59; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff59; ++ __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c +new file mode 100644 +index 000000000..f2d6fb042 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c +@@ -0,0 +1,84 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6664666466646664; ++ *((unsigned long *)&__m128i_result[0]) = 0x6664666466646664; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x5d5d5d5d5d5d5d55; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x5d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x5959595959595959; ++ *((unsigned long *)&__m128i_result[0]) = 0x5959595959595959; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x59); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0xaa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0b4c600000000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004280808080808; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0xa4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff9411; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff941d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_result[0]) = 0x000047404f4f040d; ++ __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x4f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c +new file mode 100644 +index 000000000..e05af675e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c +@@ -0,0 +1,371 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe001ffffe001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe001ffffe001; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000038335ca2777; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800800000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf51df8dbd6050189; ++ *((unsigned long *)&__m128i_result[0]) = 0x0983e2dbf235ed87; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5d5d5d5d5d5d5d55; ++ *((unsigned long *)&__m128i_result[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe00fcfffe21fd01; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x80000000fff7fc01; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe00000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff01010105; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001c00ffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010201808040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010280808040; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f8000003f800001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000104000800; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000100; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff994cb09c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc3639d96; ++ *((unsigned long *)&__m128i_op1[1]) = 0x20de27761210386d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x34632935195a123c; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff994db09c; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc7639d96; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000545cab1d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000081a83bea; ++ *((unsigned long *)&__m128i_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_result[1]) = 0x00400000547cab1d; ++ *((unsigned long *)&__m128i_result[0]) = 0x2000000081a83fea; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000038003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000040033; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100080000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0909090909090909; ++ *((unsigned long *)&__m128i_result[0]) = 0x0909090909090909; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00a600e000a600e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01500178010000f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000001000000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfefbff06fffa0004; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfefeff04fffd0004; ++ *((unsigned long *)&__m128i_result[1]) = 0x4008804080040110; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040801080200110; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x8101010181010101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101030101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101030101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5779108fdedda7e4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xd78cfd70b5f65d77; ++ *((unsigned long *)&__m128i_result[0]) = 0x5779108fdedda7e5; ++ __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00004a1e00004a1e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000000040000000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000100200001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000200020002; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff80ffff7e02; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00feff8000ff80ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0280000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff81ffff7f03; ++ *((unsigned long *)&__m128i_result[0]) = 0x04ffff8101ff81ff; ++ __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4480000044800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x45c0000044800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x4481000144810001; ++ *((unsigned long *)&__m128i_result[0]) = 0x45c04000c4808000; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x3a8100013a810001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7bc04000ba808000; ++ __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000cecd00004657; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000c90000011197; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000200000800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100800000; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8000017f800001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000017f800001; ++ __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c +new file mode 100644 +index 000000000..540a724a7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c +@@ -0,0 +1,279 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020002000200020; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040000000ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040000000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x54beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8024d8f6a494afcb; ++ *((unsigned long *)&__m128i_result[1]) = 0x54feed87bc3f2be1; ++ *((unsigned long *)&__m128i_result[0]) = 0x8064d8f6a494afcb; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000c400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x001000100010c410; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x3e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_result[0]) = 0x3b2c8aefd44be966; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040004017fda869; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x800000ff080000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000010000; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000000040000; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_result[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_result[0]) = 0x0982eadaf234ed87; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000006; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000030000003f; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe5e5e5e5e5e5e5e5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe5e5e5e5e4e4e46d; ++ *((unsigned long *)&__m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; ++ *((unsigned long *)&__m128i_result[0]) = 0xe5e5e5e5e4e4e46d; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800080008000800; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100000001000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000001000000; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020207fff; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000900000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000900013fa0; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ff0008000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x40f3fa8000800080; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000040000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m128i_result[0]) = 0xc404040404040404; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000040804000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000040804000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000040a04000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f81e3779b97f4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff02000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f81e3779b97f4a8; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100010001000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100010001000101; ++ __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000010000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002711250a27112; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00d2701294027112; ++ *((unsigned long *)&__m128i_result[1]) = 0x080a791a58aa791a; ++ *((unsigned long *)&__m128i_result[0]) = 0x08da781a9c0a791a; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[1]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_result[0]) = 0x1313131313131313; ++ __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000000; ++ __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff0008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff0008000000080; ++ __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c +new file mode 100644 +index 000000000..34246c551 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffffff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff000000ff00; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a00000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001580000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c +new file mode 100644 +index 000000000..986b7d566 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c +@@ -0,0 +1,55 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010000; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003fffffff000000; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0005fe0300010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe03000101010000; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000d3259a; ++ __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c +new file mode 100644 +index 000000000..2c1099a04 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c +@@ -0,0 +1,266 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe2ecd48adedc7c82; ++ *((unsigned long *)&__m128i_op0[0]) = 0x25d666472b01d18d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303020102020001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000000201; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000007070700; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002010202; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000007e8a60; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001edde; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcd1de80217374041; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000001fffff59; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000aaaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe500ffffc085; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc000ffffc005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000012; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001200000012; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000a00000009; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x413e276583869d79; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f017f9d8726d3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc090380000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000200000000d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fec20704; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclo_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000200000001c; ++ __m128i_out = __lsx_vclo_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c +new file mode 100644 +index 000000000..12df2c670 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c +@@ -0,0 +1,265 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000800100008; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000001fc1a568; ++ *((unsigned long *)&__m128i_op0[0]) = 0x02693fe0e7beb077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0006000200000000; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f7f000b000b000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000b000b010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128i_result[0]) = 0x0804080407040804; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf0bd80bd80bd8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010000fe7c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000100010000fe01; ++ *((unsigned long *)&__m128i_result[1]) = 0x000f000f00100000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000f000f00100000; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100000008080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000039; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000039; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000f00080008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000a00080008; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000bffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x687a8373f249bc44; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7861145d9241a14a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101030100010001; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vclz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080700000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000f0000000f; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000008000001e; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000200000001b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000000; ++ __m128i_out = __lsx_vclz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080805; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080805; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vclz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000000000; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vclz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c +new file mode 100644 +index 000000000..cb4be0475 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c +@@ -0,0 +1,299 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xc110000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc00d060000000000; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101000101010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000fe0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00ffffff00ff; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010100000000; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff9727ffff9727; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffe79ffffba5f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x010169d9010169d9; ++ *((unsigned long *)&__m128i_result[0]) = 0x01010287010146a1; ++ __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80010001b57fc565; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8001000184000be0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000080001fffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff9cf0d77b; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc1000082b0fb585b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff010000ff01; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363abdf16; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000030000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffc00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00001ff800000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffe800e80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6a1a3fbb3c90260e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xe6a0cf86a2fb5345; ++ *((unsigned long *)&__m128i_result[0]) = 0x95e5c045c36fd9f2; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2e3a36363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa2e3a36463636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f80000000000007; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000700000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000e32c50e; ++ *((unsigned long *)&__m128i_result[0]) = 0xf2b2ce330e32c50e; ++ __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000001; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001084314a6; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001084314a6; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000ffef0010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000010000010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4280000042800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xbd7fffffbd800000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op1[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000004ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c +new file mode 100644 +index 000000000..f2bc7df27 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c +@@ -0,0 +1,254 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x31b1777777777776; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6eee282828282829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0effeffefdffa1e0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe6004c5f64284224; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000002a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000003f200001e01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000014bf000019da; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c99aed5b88fcf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7c3650c5f79a61a3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080800008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffd700; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffbfff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080006b0000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000001ff1745745c; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff14eb54ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x14ea6a002a406a00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80008a7555aa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a7535006af05cf9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfebffefffebffeff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfebffefffebffeff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363797c63996399; ++ *((unsigned long *)&__m128i_op0[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363797c63996399; ++ *((unsigned long *)&__m128i_op1[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000036de0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003be14000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000007e8a60; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000001edde; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000015d926c7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000e41b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000feff2356; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fd165486; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002427c2ee; ++ __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c +new file mode 100644 +index 000000000..f6390800d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c +@@ -0,0 +1,342 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000f909; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1010111105050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4040000041410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000110011; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005000500000000; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000003ffe2; ++ __m128i_out = __lsx_vexth_h_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x000003c000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long *)&__m128i_op0[0]) = 0x800000005b4b1b18; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffb9fe00003640; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe4eb00001b18; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfec00130014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfec00130014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000370bffffdfec; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000014; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe500c085c000c005; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe5c1a185c48004c5; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe500ffffc085; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc000ffffc005; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_w_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5c9c9c9ce3636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x63635c9e63692363; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000005c9c9c9c; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffe3636363; ++ __m128i_out = __lsx_vexth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63b2ac27aa076aeb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000063b2ac27; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffaa076aeb; ++ __m128i_out = __lsx_vexth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000002a001a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001a000b; ++ __m128i_out = __lsx_vexth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x012927ffff272800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0028280000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000020000020; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000080; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3a8000003a800000; ++ __m128i_out = __lsx_vexth_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c +new file mode 100644 +index 000000000..6ab217e97 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c +@@ -0,0 +1,182 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x004f0080004f0080; ++ *((unsigned long *)&__m128i_result[0]) = 0x004f0080004f0080; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff0000007f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x5); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x002cffacffacffab; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000007f00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vexth_hu_bu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000082020201; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000820200000201; ++ __m128i_out = __lsx_vexth_wu_hu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fec20704; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000012; ++ __m128i_out = __lsx_vexth_wu_hu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vexth_du_wu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_du_wu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_du_wu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_du_wu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000001; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001fc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000020000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000b4a00008808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080800000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000b4a00008808; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000400080003fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bc2000007e10; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000400080003fff; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vexth_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c +new file mode 100644 +index 000000000..99854dbd8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000170014; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0cff78ff96ff14; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe500ffffc085; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc000ffffc005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc000ffffc005; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3131313131313131; ++ __m128i_out = __lsx_vextl_q_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c +new file mode 100644 +index 000000000..73bb530c9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000101fffff8b68; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000b6fffff8095; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000b6fffff8095; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000104000800; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010000fe7c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000100010000fe01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010000fe01; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000d82; ++ *((unsigned long *)&__m128i_op0[0]) = 0x046a09ec009c0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x046a09ec009c0000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextl_qu_du (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c +new file mode 100644 +index 000000000..8d4158b57 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c +@@ -0,0 +1,479 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x92); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0200020002000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0200020002000200; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff02000200; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x342caf9be55700b5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_result[0]) = 0x342caf9bffff1fff; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xcc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000a16316b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000063636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x16161616a16316b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000a16316b0; ++ *((unsigned long *)&__m128i_result[0]) = 0x16161616a16316b0; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xa7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc45a851c40c18; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x48); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xcc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000005d5d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x41); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ffffffeffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffffeffffffff; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xe6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000a0000000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000a00000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a0000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xaf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x67); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004fcfcfd01f9f9f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004fcfcfd01f9f9f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x004f1fcfd01f9f9f; ++ *((unsigned long *)&__m128i_result[0]) = 0x9f4fcfcfcf800000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xda); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x75b043c4d17db125; ++ *((unsigned long *)&__m128i_op0[0]) = 0xeef8227b596117b1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x75b043c4d17db125; ++ *((unsigned long *)&__m128i_result[0]) = 0xeef8227b4f8017b1; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000de32400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x77); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363797c63996399; ++ *((unsigned long *)&__m128i_op0[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363797c63990099; ++ *((unsigned long *)&__m128i_result[0]) = 0x171f0a1f6376441f; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0x94); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0bd80bd80bd80000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xf9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x41dfbe1f41e0ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc2ffe000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x41dfbe1f41e0ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffc100010001; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xec); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128i_result[1]) = 0x5237c1baffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x7d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffbd994889; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000a092444; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000890000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0x58); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff8607db959f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0cff78ff96ff14; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xc2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01ef013f01e701f8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x35bb8d32b2625c00; ++ *((unsigned long *)&__m128i_result[1]) = 0x00008d3200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0xea); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8003000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4040ffffc0400004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8003000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x74); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000001ffff9515; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0x67); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xf4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x71); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x82); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xd5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xf3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbbe5560400010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe7e5dabf00010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbbe5560400010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe7e5dabf00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xe7e5560400010001; ++ *((unsigned long *)&__m128i_result[0]) = 0xe7e5dabf00010001; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0xf3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x5d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xb6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x975ca6046e2e4889; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1748c4f9ed1a5870; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffc606ec5; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000014155445; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x76); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000024170000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01017f3c00000148; ++ *((unsigned long *)&__m128i_op1[0]) = 0x117d7f7b093d187f; ++ *((unsigned long *)&__m128i_result[1]) = 0x117d7f7b093d187f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000034; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x70); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe519ab7e71e33848; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffab7e71e33848; ++ __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0xbc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff760386bdae46; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc1fc7941bc7e00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff7603; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xc3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000003b0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff2356fe165486; ++ __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x70); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x8a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c +new file mode 100644 +index 000000000..7ffbd385e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c +@@ -0,0 +1,407 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000fea8ff44; ++ *((unsigned long *)&__m128d_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128d_op1[0]) = 0x2020202020202020; ++ *((unsigned long *)&__m128d_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128d_result[0]) = 0x2020202020202020; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128d_result[0]) = 0x1000100010001000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x000000000000000f; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000010100fe0101; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff0200ffff01ff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0001010100fe0100; ++ *((unsigned long *)&__m128d_result[0]) = 0xffff0200ffff01ff; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fffffffa0204000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7f370101ff04ffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7f3bffffa0226021; ++ *((unsigned long *)&__m128d_result[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7fffffffa0204000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128d_op1[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128d_op1[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128d_result[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128d_result[0]) = 0x27b1b106b8145f50; ++ __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000100000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x1000100000001000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000100000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x1000100000001000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000007000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128d_op1[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffab7e71e33848; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128d_result[1]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128d_result[1]) = 0x80000000fff8fff8; ++ *((unsigned long *)&__m128d_result[0]) = 0x80000000fff80000; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0xb55ccf30f52a6a68; ++ *((unsigned long *)&__m128d_op1[0]) = 0x4e0018eceb82c53a; ++ *((unsigned long *)&__m128d_result[1]) = 0x355ccf30f52a6a68; ++ *((unsigned long *)&__m128d_result[0]) = 0xce0018eceb82c53a; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff00006c82; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00009b140000917b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffff00006c82; ++ *((unsigned long *)&__m128d_result[0]) = 0x00009b140000917b; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000100000020; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000083b00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128d_op0[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128d_op1[1]) = 0xe6d4572c8a5835bc; ++ *((unsigned long *)&__m128d_op1[0]) = 0xe5017c2ac9ca9fd0; ++ *((unsigned long *)&__m128d_result[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_result[0]) = 0x65017c2ac9ca9fd0; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_op0[0]) = 0x65017c2ac9ca9fd0; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00008bf700017052; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000f841000091aa; ++ *((unsigned long *)&__m128d_result[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_result[0]) = 0x65017c2ac9ca9fd0; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000004000000002; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x5555410154551515; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0004455501500540; ++ *((unsigned long *)&__m128d_result[1]) = 0xd555410154551515; ++ *((unsigned long *)&__m128d_result[0]) = 0x8004455501500540; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0003000300000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0003000300a10003; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128d_op1[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long *)&__m128d_result[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long *)&__m128d_result[0]) = 0x34b8122ef4054bb3; ++ __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7f4000007f040000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7f0200007f020000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffff01018888; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000100007f01; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0400000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000ff801c9e; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000810000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x40eff02383e383e4; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000cd630000cd63; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffff00000000ffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128d_op1[0]) = 0x03aa558e1d37b5a1; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffefffe011df03e; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffffffefffffffe; ++ __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c +new file mode 100644 +index 000000000..388430278 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c +@@ -0,0 +1,470 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x05050505; ++ *((int *)&__m128_op0[2]) = 0x05050505; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x05050000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x03574e38; ++ *((int *)&__m128_op1[0]) = 0xe496cbc9; ++ *((int *)&__m128_result[3]) = 0x05050505; ++ *((int *)&__m128_result[2]) = 0x05050505; ++ *((int *)&__m128_result[1]) = 0x03574e38; ++ *((int *)&__m128_result[0]) = 0xe496cbc9; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000000f; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00077f88; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00077f97; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x000000ff; ++ *((int *)&__m128_op0[0]) = 0x000000ff; ++ *((int *)&__m128_op1[3]) = 0x370bdfec; ++ *((int *)&__m128_op1[2]) = 0xffecffec; ++ *((int *)&__m128_op1[1]) = 0x370bdfec; ++ *((int *)&__m128_op1[0]) = 0xffecffec; ++ *((int *)&__m128_result[3]) = 0x370bdfec; ++ *((int *)&__m128_result[2]) = 0xffecffec; ++ *((int *)&__m128_result[1]) = 0x370bdfec; ++ *((int *)&__m128_result[0]) = 0xffecffec; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ff00; ++ *((int *)&__m128_op1[0]) = 0x00ff0000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffff0000; ++ *((int *)&__m128_op0[2]) = 0xffff0000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x40088040; ++ *((int *)&__m128_op1[2]) = 0x80040110; ++ *((int *)&__m128_op1[1]) = 0x40408010; ++ *((int *)&__m128_op1[0]) = 0x80200110; ++ *((int *)&__m128_result[3]) = 0xffff0000; ++ *((int *)&__m128_result[2]) = 0xffff0000; ++ *((int *)&__m128_result[1]) = 0x40408010; ++ *((int *)&__m128_result[0]) = 0x80200110; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xfffffffc; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xfffffffc; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xfffffffc; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xfffffffc; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000001b; ++ *((int *)&__m128_op0[2]) = 0x0000001b; ++ *((int *)&__m128_op0[1]) = 0x0000001b; ++ *((int *)&__m128_op0[0]) = 0x0000001b; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x0000001b; ++ *((int *)&__m128_result[2]) = 0x0000001b; ++ *((int *)&__m128_result[1]) = 0x0000001b; ++ *((int *)&__m128_result[0]) = 0x0000001b; ++ __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x56411278; ++ *((int *)&__m128_op0[2]) = 0x43c0d41e; ++ *((int *)&__m128_op0[1]) = 0x0124d8f6; ++ *((int *)&__m128_op0[0]) = 0xa494006b; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x05010501; ++ *((int *)&__m128_op1[2]) = 0x05010501; ++ *((int *)&__m128_op1[1]) = 0x05010501; ++ *((int *)&__m128_op1[0]) = 0x0501050c; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x21f32eaf; ++ *((int *)&__m128_op0[2]) = 0x5b7a02c8; ++ *((int *)&__m128_op0[1]) = 0x407c2ca3; ++ *((int *)&__m128_op0[0]) = 0x2cbd0357; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00010400; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffe0001; ++ *((int *)&__m128_op0[2]) = 0xfffe0001; ++ *((int *)&__m128_op0[1]) = 0xfffe0001; ++ *((int *)&__m128_op0[0]) = 0xfffe0001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xfffe0001; ++ *((int *)&__m128_result[2]) = 0xfffe0001; ++ *((int *)&__m128_result[1]) = 0xfffe0001; ++ *((int *)&__m128_result[0]) = 0xfffe0001; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00002ebf; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x01000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00081f1f; ++ *((int *)&__m128_op0[2]) = 0x1f1f1f1f; ++ *((int *)&__m128_op0[1]) = 0x1f1f1f1f; ++ *((int *)&__m128_op0[0]) = 0x1f1f1f1f; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x021b7d24; ++ *((int *)&__m128_op0[2]) = 0x49678a35; ++ *((int *)&__m128_op0[1]) = 0x030298a6; ++ *((int *)&__m128_op0[0]) = 0x21030a49; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000002; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf6548a17; ++ *((int *)&__m128_op0[2]) = 0x47e59090; ++ *((int *)&__m128_op0[1]) = 0x27b169bb; ++ *((int *)&__m128_op0[0]) = 0xb8145f50; ++ *((int *)&__m128_op1[3]) = 0x004eff62; ++ *((int *)&__m128_op1[2]) = 0x00d2ff76; ++ *((int *)&__m128_op1[1]) = 0xff700028; ++ *((int *)&__m128_op1[0]) = 0x00be00a0; ++ *((int *)&__m128_result[3]) = 0xb7032c34; ++ *((int *)&__m128_result[2]) = 0x093d35ab; ++ *((int *)&__m128_result[1]) = 0xe7a6533b; ++ *((int *)&__m128_result[0]) = 0x800001b8; ++ __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fff0101; ++ *((int *)&__m128_op0[2]) = 0x81010102; ++ *((int *)&__m128_op0[1]) = 0x7fffffff; ++ *((int *)&__m128_op0[0]) = 0x81010102; ++ *((int *)&__m128_op1[3]) = 0x00000fff; ++ *((int *)&__m128_op1[2]) = 0xffffe000; ++ *((int *)&__m128_op1[1]) = 0x00001020; ++ *((int *)&__m128_op1[0]) = 0x20204000; ++ *((int *)&__m128_result[3]) = 0x7fff0101; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0x7fffffff; ++ *((int *)&__m128_result[0]) = 0xa0204000; ++ __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000fff; ++ *((int *)&__m128_op1[2]) = 0xffffe000; ++ *((int *)&__m128_op1[1]) = 0x00001020; ++ *((int *)&__m128_op1[0]) = 0x20204000; ++ *((int *)&__m128_result[3]) = 0x80000fff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x80001020; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x54feed87; ++ *((int *)&__m128_op0[2]) = 0xbc3f2be1; ++ *((int *)&__m128_op0[1]) = 0x8064d8f6; ++ *((int *)&__m128_op0[0]) = 0xa494afcb; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xd8248069; ++ *((int *)&__m128_op0[0]) = 0x7f678077; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xd8248069; ++ *((int *)&__m128_op1[0]) = 0x7f678077; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x3f800000; ++ *((int *)&__m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00070000; ++ *((int *)&__m128_op0[2]) = 0x00040000; ++ *((int *)&__m128_op0[1]) = 0x00030000; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((int *)&__m128_op1[3]) = 0x00070000; ++ *((int *)&__m128_op1[2]) = 0x00040000; ++ *((int *)&__m128_op1[1]) = 0x00030000; ++ *((int *)&__m128_op1[0]) = 0x00010000; ++ *((int *)&__m128_result[3]) = 0x3f800000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x3f800000; ++ *((int *)&__m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00010001; ++ *((int *)&__m128_op1[2]) = 0x0001007c; ++ *((int *)&__m128_op1[1]) = 0x00010001; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00001fff; ++ *((int *)&__m128_op0[2]) = 0x00001fff; ++ *((int *)&__m128_op0[1]) = 0x00000003; ++ *((int *)&__m128_op0[0]) = 0xfffffffc; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0xfffffffc; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c +new file mode 100644 +index 000000000..9706d7adc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c +@@ -0,0 +1,83 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128d_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000080; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vfclass_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c +new file mode 100644 +index 000000000..7166f954b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c +@@ -0,0 +1,74 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x7fff8000; ++ *((int *)&__m128_op0[1]) = 0x00010081; ++ *((int *)&__m128_op0[0]) = 0x00000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000020000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfe02fe02; ++ *((int *)&__m128_op0[2]) = 0xfe02fe02; ++ *((int *)&__m128_op0[1]) = 0xfe02fe02; ++ *((int *)&__m128_op0[0]) = 0xfe02fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800000008; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000000c; ++ *((int *)&__m128_op0[2]) = 0x7fff000c; ++ *((int *)&__m128_op0[1]) = 0x10001000; ++ *((int *)&__m128_op0[0]) = 0x10001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000020000000200; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000020000000200; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0c0b0a09; ++ *((int *)&__m128_op0[2]) = 0x0b0a0908; ++ *((int *)&__m128_op0[1]) = 0x0a090807; ++ *((int *)&__m128_op0[0]) = 0x09080706; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vfclass_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c +new file mode 100644 +index 000000000..b448c2076 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c +@@ -0,0 +1,244 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01010101; ++ *((int *)&__m128_op0[0]) = 0x01010101; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7ef400ad; ++ *((int *)&__m128_op0[2]) = 0x21fc7081; ++ *((int *)&__m128_op0[1]) = 0x28bf0351; ++ *((int *)&__m128_op0[0]) = 0xec69b5f2; ++ *((int *)&__m128_op1[3]) = 0xff800000; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0xff800000; ++ *((int *)&__m128_op1[0]) = 0x7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01000100; ++ *((int *)&__m128_op0[0]) = 0x01000100; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x64e464e4; ++ *((int *)&__m128_op1[0]) = 0x64e464e4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffc0ff80; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xc0800000; ++ *((int *)&__m128_op1[3]) = 0x0000001b; ++ *((int *)&__m128_op1[2]) = 0x0000001b; ++ *((int *)&__m128_op1[1]) = 0x0000001b; ++ *((int *)&__m128_op1[0]) = 0x0000001b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000002; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000002; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x34500292; ++ *((int *)&__m128_op1[0]) = 0x0f3017d6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00830029; ++ *((int *)&__m128_op0[0]) = 0x0038ff50; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff7fff80; ++ *((int *)&__m128_op0[2]) = 0xff800001; ++ *((int *)&__m128_op0[1]) = 0xe593d844; ++ *((int *)&__m128_op0[0]) = 0xe593c8c4; ++ *((int *)&__m128_op1[3]) = 0xff800000; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0xe593c8c4; ++ *((int *)&__m128_op1[0]) = 0xe593c8c4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x8a8a8a8a; ++ *((int *)&__m128_op1[2]) = 0x8a8a8a8a; ++ *((int *)&__m128_op1[1]) = 0x8a8a8a8a; ++ *((int *)&__m128_op1[0]) = 0x8a8a8a8a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x66b34f643c9c626a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x38d60e366e547876; ++ *((unsigned long *)&__m128d_op1[1]) = 0x66b34f643c9c626a; ++ *((unsigned long *)&__m128d_op1[0]) = 0x38d60e366e547876; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000700000004fdff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000300000000fdff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xf2f444429d96dbe1; ++ *((unsigned long *)&__m128d_op0[0]) = 0xddd76c75f2f44442; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc1f03e1042208410; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffbfff7fffc000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff43dfffff81fb; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c +new file mode 100644 +index 000000000..98941b47d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c +@@ -0,0 +1,516 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00007f00; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x01000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x08fdc221; ++ *((int *)&__m128_op0[2]) = 0xbfdb1927; ++ *((int *)&__m128_op0[1]) = 0x4303c67e; ++ *((int *)&__m128_op0[0]) = 0x9b7fb213; ++ *((int *)&__m128_op1[3]) = 0x0000800c; ++ *((int *)&__m128_op1[2]) = 0x0004300c; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000800; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00007fff; ++ *((int *)&__m128_op0[2]) = 0x00007fff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x2bfd9461; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x2bfd9461; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01000000; ++ *((int *)&__m128_op0[0]) = 0x01000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xcd636363; ++ *((int *)&__m128_op1[2]) = 0xcd636363; ++ *((int *)&__m128_op1[1]) = 0xcd636363; ++ *((int *)&__m128_op1[0]) = 0xcd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x007fffff; ++ *((int *)&__m128_op0[1]) = 0x007fffff; ++ *((int *)&__m128_op0[0]) = 0xff800000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x0000cecd; ++ *((int *)&__m128_op1[2]) = 0x00004657; ++ *((int *)&__m128_op1[1]) = 0x0000c900; ++ *((int *)&__m128_op1[0]) = 0x00011197; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf51df8db; ++ *((int *)&__m128_op0[2]) = 0xd6050189; ++ *((int *)&__m128_op0[1]) = 0x0983e2db; ++ *((int *)&__m128_op0[0]) = 0xf235ed87; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x3ea5016b; ++ *((int *)&__m128_op1[1]) = 0xfffefffe; ++ *((int *)&__m128_op1[0]) = 0x3f6fb04d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffa8ff9f; ++ *((int *)&__m128_op1[1]) = 0x0000ffff; ++ *((int *)&__m128_op1[0]) = 0xffabff99; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000ff00; ++ *((int *)&__m128_op1[3]) = 0x40404040; ++ *((int *)&__m128_op1[2]) = 0x40404040; ++ *((int *)&__m128_op1[1]) = 0x40404040; ++ *((int *)&__m128_op1[0]) = 0x40404040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x3bcc5098; ++ *((int *)&__m128_op1[2]) = 0x703fa5f0; ++ *((int *)&__m128_op1[1]) = 0xab7b3134; ++ *((int *)&__m128_op1[0]) = 0x9703f605; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x000000ff; ++ *((int *)&__m128_op0[0]) = 0xfe01fd02; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x0001fe01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x000000ff; ++ *((int *)&__m128_op0[0]) = 0xfe01fd02; ++ *((int *)&__m128_op1[3]) = 0x00000001; ++ *((int *)&__m128_op1[2]) = 0x00000100; ++ *((int *)&__m128_op1[1]) = 0x00000001; ++ *((int *)&__m128_op1[0]) = 0x00000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00800000; ++ *((int *)&__m128_op0[0]) = 0x00800000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00800000; ++ *((int *)&__m128_op1[0]) = 0x00800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xc2409edab019323f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x460f3b393ef4be3a; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0100000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128d_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128d_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000c000ffffc000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xd78cfd70b5f65d77; ++ *((unsigned long *)&__m128d_op1[0]) = 0x5779108fdedda7e5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000ff0000ffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c +new file mode 100644 +index 000000000..409bce0ec +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c +@@ -0,0 +1,530 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00001802; ++ *((int *)&__m128_op0[0]) = 0x041b0013; ++ *((int *)&__m128_op1[3]) = 0xff800000; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0xff800000; ++ *((int *)&__m128_op1[0]) = 0xc3080000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x17fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000404; ++ *((int *)&__m128_op1[2]) = 0x00000383; ++ *((int *)&__m128_op1[1]) = 0xffffe000; ++ *((int *)&__m128_op1[0]) = 0xffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000000fe; ++ *((int *)&__m128_op0[2]) = 0x808000ff; ++ *((int *)&__m128_op0[1]) = 0x000000fe; ++ *((int *)&__m128_op0[0]) = 0x808000fe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000020; ++ *((int *)&__m128_op0[2]) = 0x00000020; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ffc1; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000004; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xe0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x01010001; ++ *((int *)&__m128_op1[2]) = 0x00010001; ++ *((int *)&__m128_op1[1]) = 0x01010301; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffff00; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000001; ++ *((int *)&__m128_op1[2]) = 0x00000001; ++ *((int *)&__m128_op1[1]) = 0x00000001; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00060fbf; ++ *((int *)&__m128_op1[2]) = 0x02040fbf; ++ *((int *)&__m128_op1[1]) = 0x00020fbf; ++ *((int *)&__m128_op1[0]) = 0x02000fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0a752a55; ++ *((int *)&__m128_op0[1]) = 0x0a753500; ++ *((int *)&__m128_op0[0]) = 0x950fa306; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x380fdfdf; ++ *((int *)&__m128_op1[0]) = 0xc0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000007fff800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000080007f80800; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff80800001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffff80800001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x5f675e96a8d359f5; ++ *((unsigned long *)&__m128d_op0[0]) = 0x46387f95d9a68001; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x131211101211100f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x11100f0e100f0e0d; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000002a000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffff7f8c; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfcfcfcfcfcfc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128d_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000001; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x4f804f81; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x4f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fff0007; ++ *((int *)&__m128_op0[2]) = 0xe215b122; ++ *((int *)&__m128_op0[1]) = 0x7ffeffff; ++ *((int *)&__m128_op0[0]) = 0x7bfff828; ++ *((int *)&__m128_op1[3]) = 0x80010009; ++ *((int *)&__m128_op1[2]) = 0x816ac5de; ++ *((int *)&__m128_op1[1]) = 0x80010001; ++ *((int *)&__m128_op1[0]) = 0x84000bd8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xfefa0000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x9c9c9c9c; ++ *((int *)&__m128_op1[2]) = 0x9c9c9c9c; ++ *((int *)&__m128_op1[1]) = 0x9c9c9c9c; ++ *((int *)&__m128_op1[0]) = 0x9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0c0b0a09; ++ *((int *)&__m128_op0[2]) = 0x0b0a0908; ++ *((int *)&__m128_op0[1]) = 0x0a090807; ++ *((int *)&__m128_op0[0]) = 0x09080706; ++ *((int *)&__m128_op1[3]) = 0x0c0b0a09; ++ *((int *)&__m128_op1[2]) = 0x0b0a0908; ++ *((int *)&__m128_op1[1]) = 0x0a090807; ++ *((int *)&__m128_op1[0]) = 0x09080706; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000020; ++ *((int *)&__m128_op1[2]) = 0x00000020; ++ *((int *)&__m128_op1[1]) = 0x0000001f; ++ *((int *)&__m128_op1[0]) = 0x0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7ff80000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x7ff80000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff80ff0010ff06; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128d_op1[0]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c +new file mode 100644 +index 000000000..39c9cf7a7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c +@@ -0,0 +1,476 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x56411278; ++ *((int *)&__m128_op0[2]) = 0x43c0d41e; ++ *((int *)&__m128_op0[1]) = 0x0124d8f6; ++ *((int *)&__m128_op0[0]) = 0xa494006b; ++ *((int *)&__m128_op1[3]) = 0x7f800000; ++ *((int *)&__m128_op1[2]) = 0xff800000; ++ *((int *)&__m128_op1[1]) = 0xff800000; ++ *((int *)&__m128_op1[0]) = 0xff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x84939413; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000002; ++ *((int *)&__m128_op0[0]) = 0xbefcb21e; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xfffefff6; ++ *((int *)&__m128_op0[0]) = 0xfff80002; ++ *((int *)&__m128_op1[3]) = 0x000000c5; ++ *((int *)&__m128_op1[2]) = 0xac01015b; ++ *((int *)&__m128_op1[1]) = 0xaaacac88; ++ *((int *)&__m128_op1[0]) = 0xa3a9a96a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff84fff4; ++ *((int *)&__m128_op0[2]) = 0xff84fff4; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff0; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x11000f20; ++ *((int *)&__m128_op0[2]) = 0x10000e20; ++ *((int *)&__m128_op0[1]) = 0x0f000d20; ++ *((int *)&__m128_op0[0]) = 0x0e000c20; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000c00; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00bd003d; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000005; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000005; ++ *((int *)&__m128_op1[3]) = 0xfffefffe; ++ *((int *)&__m128_op1[2]) = 0xfffefffe; ++ *((int *)&__m128_op1[1]) = 0xfffefffe; ++ *((int *)&__m128_op1[0]) = 0xfffefffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xff800001; ++ *((int *)&__m128_op0[0]) = 0x0f800000; ++ *((int *)&__m128_op1[3]) = 0x00000009; ++ *((int *)&__m128_op1[2]) = 0x00000009; ++ *((int *)&__m128_op1[1]) = 0xff80000a; ++ *((int *)&__m128_op1[0]) = 0x0f800009; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0x3b5eae24; ++ *((int *)&__m128_op0[0]) = 0xab7e3848; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00003f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63636363; ++ *((int *)&__m128_op0[2]) = 0x3f3e47c1; ++ *((int *)&__m128_op0[1]) = 0x41f8e080; ++ *((int *)&__m128_op0[0]) = 0xf1ef4eaa; ++ *((int *)&__m128_op1[3]) = 0x0000cecd; ++ *((int *)&__m128_op1[2]) = 0x00004657; ++ *((int *)&__m128_op1[1]) = 0x0000c900; ++ *((int *)&__m128_op1[0]) = 0x00011197; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x454c2996; ++ *((int *)&__m128_op0[2]) = 0x0ffe354e; ++ *((int *)&__m128_op0[1]) = 0x9e063f80; ++ *((int *)&__m128_op0[0]) = 0x2742ba3e; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x42652524; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00070000; ++ *((int *)&__m128_op0[2]) = 0x00050000; ++ *((int *)&__m128_op0[1]) = 0x00030000; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xff81007c; ++ *((int *)&__m128_op1[1]) = 0xffb7005f; ++ *((int *)&__m128_op1[0]) = 0x0070007c; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0000006f; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfbffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7bffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xfbffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x7bffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0002a000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x0002a000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xfc606ec5; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x14155445; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x01030103; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00060fbf; ++ *((int *)&__m128_op0[2]) = 0x02040fbf; ++ *((int *)&__m128_op0[1]) = 0x00020fbf; ++ *((int *)&__m128_op0[0]) = 0x02000fbf; ++ *((int *)&__m128_op1[3]) = 0x63636363; ++ *((int *)&__m128_op1[2]) = 0x63636363; ++ *((int *)&__m128_op1[1]) = 0xffd27db0; ++ *((int *)&__m128_op1[0]) = 0x10d20fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000008; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000036de0000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000003be14000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128d_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_op1[0]) = 0x65017c2ac9ca9fd0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x007f007f007f007e; ++ *((unsigned long *)&__m128d_op1[0]) = 0x007f007f007effc6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000015800000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7f8100017f810001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7f8100017f810001; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff00007fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128d_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000001e; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000001580000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c +new file mode 100644 +index 000000000..c3da43bb4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c +@@ -0,0 +1,378 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x7ff80000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x0bd80bd8; ++ *((int *)&__m128_op1[2]) = 0x0bdfffff; ++ *((int *)&__m128_op1[1]) = 0x0bd80bd8; ++ *((int *)&__m128_op1[0]) = 0x0bd80000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ff0077; ++ *((int *)&__m128_op0[2]) = 0x00070077; ++ *((int *)&__m128_op0[1]) = 0x00e600ef; ++ *((int *)&__m128_op0[0]) = 0x00ee01de; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00100010; ++ *((int *)&__m128_op0[2]) = 0x00100010; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x000000ff; ++ *((int *)&__m128_op1[0]) = 0xfe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xbf800000; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0xcf000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x003f0000; ++ *((int *)&__m128_op1[2]) = 0x0000003f; ++ *((int *)&__m128_op1[1]) = 0x003f0000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x01ff01ff; ++ *((int *)&__m128_op0[2]) = 0x01ff01ff; ++ *((int *)&__m128_op0[1]) = 0x01ff01ff; ++ *((int *)&__m128_op0[0]) = 0x01ff01ff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x20202020; ++ *((int *)&__m128_op0[2]) = 0x20202020; ++ *((int *)&__m128_op0[1]) = 0x20202020; ++ *((int *)&__m128_op0[0]) = 0x20207fff; ++ *((int *)&__m128_op1[3]) = 0x32d3f35e; ++ *((int *)&__m128_op1[2]) = 0xcd509d13; ++ *((int *)&__m128_op1[1]) = 0x3e081b3c; ++ *((int *)&__m128_op1[0]) = 0x93f6b356; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffff0000; ++ *((int *)&__m128_op0[2]) = 0xffff0000; ++ *((int *)&__m128_op0[1]) = 0x40408010; ++ *((int *)&__m128_op0[0]) = 0x80200110; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80000000; ++ *((int *)&__m128_op0[2]) = 0x80000008; ++ *((int *)&__m128_op0[1]) = 0xa2f54a1e; ++ *((int *)&__m128_op0[0]) = 0xa2f54a1e; ++ *((int *)&__m128_op1[3]) = 0x80000000; ++ *((int *)&__m128_op1[2]) = 0x80000008; ++ *((int *)&__m128_op1[1]) = 0xa2f54a1e; ++ *((int *)&__m128_op1[0]) = 0xa2f54a1e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000000000fc00; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000fc00; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000020302030; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000020302030; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x5d7f5d007f6a007f; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff7fff7fff7f00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffe8081000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x1c083b1f3b1f3b1f; ++ *((unsigned long *)&__m128d_op0[0]) = 0xf244b948a323ab42; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000100fe000070a1; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000115ffffffa1; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffff8f8da00; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000003ea5016c; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffefefd3f7027c5; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000ffce; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x400000003fffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x4000000040000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128d_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c +new file mode 100644 +index 000000000..5228dbede +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c +@@ -0,0 +1,170 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffffeff; ++ *((int *)&__m128_op0[2]) = 0xfffffeff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffcff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00800000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xf4b6f3f5; ++ *((int *)&__m128_op0[0]) = 0x2f4ef4a8; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x08080808; ++ *((int *)&__m128_op1[2]) = 0x08080808; ++ *((int *)&__m128_op1[1]) = 0x08080808; ++ *((int *)&__m128_op1[0]) = 0x08080808; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000ffce; ++ *((int *)&__m128_op1[3]) = 0xffff0001; ++ *((int *)&__m128_op1[2]) = 0x1cf0c569; ++ *((int *)&__m128_op1[1]) = 0xc0000002; ++ *((int *)&__m128_op1[0]) = 0xb0995850; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0a752a55; ++ *((int *)&__m128_op0[1]) = 0x0a753500; ++ *((int *)&__m128_op0[0]) = 0x950fa306; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x0a752a55; ++ *((int *)&__m128_op1[1]) = 0x0a753500; ++ *((int *)&__m128_op1[0]) = 0x950fa306; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128d_op0[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c +new file mode 100644 +index 000000000..a2beff53f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c +@@ -0,0 +1,253 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x0000ffff; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0x0000ffff; ++ *((int *)&__m128_op0[0]) = 0x0000fffe; ++ *((int *)&__m128_op1[3]) = 0x0000ffff; ++ *((int *)&__m128_op1[2]) = 0x0000ffff; ++ *((int *)&__m128_op1[1]) = 0x0000ffff; ++ *((int *)&__m128_op1[0]) = 0x0000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x7f800000; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x0000fffe; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffff0008; ++ *((int *)&__m128_op1[3]) = 0xffc2ffe0; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ffc1; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000008; ++ *((int *)&__m128_op0[1]) = 0x00200020; ++ *((int *)&__m128_op0[0]) = 0x00200020; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffefffe; ++ *((int *)&__m128_op0[2]) = 0xfffefffe; ++ *((int *)&__m128_op0[1]) = 0xfffefffe; ++ *((int *)&__m128_op0[0]) = 0xfffefffe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xf001f001; ++ *((int *)&__m128_op1[0]) = 0x0101f002; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfeffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xfeffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000fff; ++ *((int *)&__m128_op1[2]) = 0xffffe000; ++ *((int *)&__m128_op1[1]) = 0x00001020; ++ *((int *)&__m128_op1[0]) = 0x20204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128d_op0[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff3d06ffff4506; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7ffffffe7ffff800; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000003bfb4000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000100010100; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff00011cf0c569; ++ *((unsigned long *)&__m128d_op0[0]) = 0xc0000002b0995850; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128d_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000044470000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c +new file mode 100644 +index 000000000..bfa4914be +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c +@@ -0,0 +1,214 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x80000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00a300a3; ++ *((int *)&__m128_op1[2]) = 0x00a300a3; ++ *((int *)&__m128_op1[1]) = 0x00a300a3; ++ *((int *)&__m128_op1[0]) = 0x00a300a3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xb8ec43be; ++ *((int *)&__m128_op1[2]) = 0xfe38e64b; ++ *((int *)&__m128_op1[1]) = 0x6477d042; ++ *((int *)&__m128_op1[0]) = 0x343cce24; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000010; ++ *((int *)&__m128_op0[2]) = 0x00100010; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020000; ++ *((int *)&__m128_op0[0]) = 0xffff0001; ++ *((int *)&__m128_op1[3]) = 0x63636363; ++ *((int *)&__m128_op1[2]) = 0x63636363; ++ *((int *)&__m128_op1[1]) = 0x63636363; ++ *((int *)&__m128_op1[0]) = 0x63636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x03080401; ++ *((int *)&__m128_op0[2]) = 0x0d090107; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0a0a0a000a0a0a00; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0a0a0a0009090900; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x387c7e0a133f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128d_op0[0]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c +new file mode 100644 +index 000000000..bc573936d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c +@@ -0,0 +1,450 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf2f2e5e5; ++ *((int *)&__m128_op0[2]) = 0xe5e5e5e5; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xe5e5e5e5; ++ *((int *)&__m128_op1[2]) = 0xe5e5e5e5; ++ *((int *)&__m128_op1[1]) = 0xe5e5e5e5; ++ *((int *)&__m128_op1[0]) = 0xe4e4e46d; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00800000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x1f400000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x14ccc632; ++ *((int *)&__m128_op0[2]) = 0x0076a4d2; ++ *((int *)&__m128_op0[1]) = 0x685670d2; ++ *((int *)&__m128_op0[0]) = 0x7e00682a; ++ *((int *)&__m128_op1[3]) = 0x14ccc632; ++ *((int *)&__m128_op1[2]) = 0x0076a4d2; ++ *((int *)&__m128_op1[1]) = 0x685670d2; ++ *((int *)&__m128_op1[0]) = 0x7e00682a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xc6c6c6c6; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xc6c6c6c6; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xc6c6c6c6; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xc6c6c6c6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000006; ++ *((int *)&__m128_op1[2]) = 0x00007fff; ++ *((int *)&__m128_op1[1]) = 0x00000008; ++ *((int *)&__m128_op1[0]) = 0xffffa209; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00fc0000; ++ *((int *)&__m128_op1[3]) = 0xfe07e5fe; ++ *((int *)&__m128_op1[2]) = 0xfefdddfe; ++ *((int *)&__m128_op1[1]) = 0x00020100; ++ *((int *)&__m128_op1[0]) = 0xfedd0c00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffff0000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000fffd; ++ *((int *)&__m128_op1[3]) = 0x7fffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00008000; ++ *((int *)&__m128_op1[2]) = 0x3f80ffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x370bdfec; ++ *((int *)&__m128_op0[2]) = 0xffecffec; ++ *((int *)&__m128_op0[1]) = 0x370bdfec; ++ *((int *)&__m128_op0[0]) = 0xa2eb9931; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000040; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000040; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xc2fc0000; ++ *((int *)&__m128_op1[2]) = 0xc3040000; ++ *((int *)&__m128_op1[1]) = 0xc2fc0000; ++ *((int *)&__m128_op1[0]) = 0xc3040000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00fe00fe; ++ *((int *)&__m128_op0[2]) = 0x000200fe; ++ *((int *)&__m128_op0[1]) = 0x00fe00fe; ++ *((int *)&__m128_op0[0]) = 0x000200fe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000004; ++ *((int *)&__m128_op1[0]) = 0x55555555; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000158; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffa8; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf3e6586b; ++ *((int *)&__m128_op0[2]) = 0x60d7b152; ++ *((int *)&__m128_op0[1]) = 0xf7077b93; ++ *((int *)&__m128_op0[0]) = 0x4ac0e000; ++ *((int *)&__m128_op1[3]) = 0x1498507a; ++ *((int *)&__m128_op1[2]) = 0x144d0050; ++ *((int *)&__m128_op1[1]) = 0x7b370981; ++ *((int *)&__m128_op1[0]) = 0xc01200e0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffc2007a; ++ *((int *)&__m128_op0[2]) = 0xff230027; ++ *((int *)&__m128_op0[1]) = 0x0080005e; ++ *((int *)&__m128_op0[0]) = 0xff600001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000100010001fffd; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000004fc04f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128d_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128d_op0[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128d_op1[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffff700000009; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffffff700000009; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x4fa432d67fc00000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffcffff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000fffd000a0000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xf0fd800080000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000a00028004000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00820082ff81ff81; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff81ff81ff81ff81; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0007000100040102; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0003000100010101; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0007000100040102; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0003000100010101; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c +new file mode 100644 +index 000000000..87cb8da7c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c +@@ -0,0 +1,407 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffdfff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffdfff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffe000; ++ *((int *)&__m128_op1[0]) = 0x01ffe200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010002; ++ *((int *)&__m128_op0[2]) = 0x0000fe7d; ++ *((int *)&__m128_op0[1]) = 0x00010002; ++ *((int *)&__m128_op0[0]) = 0x0000fe02; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x0000007b; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x1223dabf; ++ *((int *)&__m128_op0[2]) = 0x4c3b3549; ++ *((int *)&__m128_op0[1]) = 0x8e8f8626; ++ *((int *)&__m128_op0[0]) = 0xf15be124; ++ *((int *)&__m128_op1[3]) = 0xfffffacd; ++ *((int *)&__m128_op1[2]) = 0xb6dbecac; ++ *((int *)&__m128_op1[1]) = 0x1f5533a6; ++ *((int *)&__m128_op1[0]) = 0x94f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfbffffff; ++ *((int *)&__m128_op0[0]) = 0x27001517; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ffff; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffff53d9; ++ *((int *)&__m128_op0[1]) = 0xffff0001; ++ *((int *)&__m128_op0[0]) = 0xffff9515; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00010001; ++ *((int *)&__m128_op1[2]) = 0x00010001; ++ *((int *)&__m128_op1[1]) = 0x00010001; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000080; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00df020f; ++ *((int *)&__m128_op0[0]) = 0x0078007f; ++ *((int *)&__m128_op1[3]) = 0x0037ffd4; ++ *((int *)&__m128_op1[2]) = 0x0083ffe5; ++ *((int *)&__m128_op1[1]) = 0x001e0052; ++ *((int *)&__m128_op1[0]) = 0x001ffff9; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00ff00ff; ++ *((int *)&__m128_op0[0]) = 0x00ff00ff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x05452505; ++ *((int *)&__m128_op0[1]) = 0x00000004; ++ *((int *)&__m128_op0[0]) = 0x442403e4; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000101; ++ *((int *)&__m128_op0[2]) = 0x00000101; ++ *((int *)&__m128_op0[1]) = 0x00000101; ++ *((int *)&__m128_op0[0]) = 0x00000101; ++ *((int *)&__m128_op1[3]) = 0x00000002; ++ *((int *)&__m128_op1[2]) = 0x00000002; ++ *((int *)&__m128_op1[1]) = 0x00000002; ++ *((int *)&__m128_op1[0]) = 0x00000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010000; ++ *((int *)&__m128_op0[2]) = 0x00010000; ++ *((int *)&__m128_op0[1]) = 0x0000cd63; ++ *((int *)&__m128_op0[0]) = 0x0000cd63; ++ *((int *)&__m128_op1[3]) = 0xffffcd63; ++ *((int *)&__m128_op1[2]) = 0xffffcd63; ++ *((int *)&__m128_op1[1]) = 0xffffd765; ++ *((int *)&__m128_op1[0]) = 0xffffd765; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffff00ff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffff0000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffff0c8000c212; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfefffeff7f002d06; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00f0008100800080; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00f000807000009e; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffe00029f9f6061; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000c0010000a186; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00067fff0002a207; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff80000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000013ec13e; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc03fc03fc0ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffff00018d8b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x67eb85af0000b000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c +new file mode 100644 +index 000000000..3845e8ec3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c +@@ -0,0 +1,512 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x0000ffff; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x0000ffff; ++ *((int *)&__m128_op1[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x7f800000; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((int *)&__m128_op1[3]) = 0x7f800000; ++ *((int *)&__m128_op1[2]) = 0x7f800000; ++ *((int *)&__m128_op1[1]) = 0x7f800000; ++ *((int *)&__m128_op1[0]) = 0x7f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0x00007f01; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffff02; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x05452505; ++ *((int *)&__m128_op1[1]) = 0x00000004; ++ *((int *)&__m128_op1[0]) = 0x442403e4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x5f675e96; ++ *((int *)&__m128_op0[2]) = 0xe29a5a60; ++ *((int *)&__m128_op0[1]) = 0x7fff7fff; ++ *((int *)&__m128_op0[0]) = 0x7fff7fff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x5e695e95; ++ *((int *)&__m128_op1[0]) = 0xe1cb5a01; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00800080; ++ *((int *)&__m128_op0[2]) = 0x00800080; ++ *((int *)&__m128_op0[1]) = 0x0080006b; ++ *((int *)&__m128_op0[0]) = 0x0000000b; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x80808080; ++ *((int *)&__m128_op1[0]) = 0x806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x7f800000; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff800000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xff800000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfffefffe; ++ *((int *)&__m128_op0[0]) = 0xfffffffc; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xfffefffe; ++ *((int *)&__m128_op1[0]) = 0xfffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffc2ffe7; ++ *((int *)&__m128_op0[2]) = 0x00000007; ++ *((int *)&__m128_op0[1]) = 0x0000ffc1; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xfffff1a0; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x153e3e49; ++ *((int *)&__m128_op0[2]) = 0x307d0771; ++ *((int *)&__m128_op0[1]) = 0x0d8e3670; ++ *((int *)&__m128_op0[0]) = 0x6ac02b9b; ++ *((int *)&__m128_op1[3]) = 0x55aa55c3; ++ *((int *)&__m128_op1[2]) = 0xd5aa55c4; ++ *((int *)&__m128_op1[1]) = 0xaa55556f; ++ *((int *)&__m128_op1[0]) = 0xd5aaaac1; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0x0015172b; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xfffffffe; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xfffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffff0000; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x86dd8341; ++ *((int *)&__m128_op1[2]) = 0xb164f12b; ++ *((int *)&__m128_op1[1]) = 0x9611c398; ++ *((int *)&__m128_op1[0]) = 0x5b3159f5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000300000003; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xe93d0bd19ff07013; ++ *((unsigned long *)&__m128d_op0[0]) = 0x65017c2ac9ca9fd0; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000001021; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128d_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0080000000800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x345002920f3017d6; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffff8607db959f; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ef8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128d_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffee00000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c +new file mode 100644 +index 000000000..964eff79f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00003fee; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000004; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000002; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x03574e3a; ++ *((int *)&__m128_op1[2]) = 0x03574e3a; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00010400; ++ *((int *)&__m128_op1[3]) = 0x10f917d7; ++ *((int *)&__m128_op1[2]) = 0x2d3d01e4; ++ *((int *)&__m128_op1[1]) = 0x203e16d1; ++ *((int *)&__m128_op1[0]) = 0x16de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000101f; ++ *((int *)&__m128_op0[2]) = 0xffff8b68; ++ *((int *)&__m128_op0[1]) = 0x00000b6f; ++ *((int *)&__m128_op0[0]) = 0xffff8095; ++ *((int *)&__m128_op1[3]) = 0x10f917d7; ++ *((int *)&__m128_op1[2]) = 0x2d3d01e4; ++ *((int *)&__m128_op1[1]) = 0x203e16d1; ++ *((int *)&__m128_op1[0]) = 0x16de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x11000f20; ++ *((int *)&__m128_op0[2]) = 0x10000e20; ++ *((int *)&__m128_op0[1]) = 0x0f000d20; ++ *((int *)&__m128_op0[0]) = 0x0e000c20; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00050005; ++ *((int *)&__m128_op0[2]) = 0x00050005; ++ *((int *)&__m128_op0[1]) = 0x00050005; ++ *((int *)&__m128_op0[0]) = 0x00050005; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x15d926c7; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000e41b; ++ *((int *)&__m128_op1[3]) = 0xfffffacd; ++ *((int *)&__m128_op1[2]) = 0xb6dbecac; ++ *((int *)&__m128_op1[1]) = 0x1f5533a6; ++ *((int *)&__m128_op1[0]) = 0x94f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x04040504; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x04040504; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0001000c; ++ *((int *)&__m128_op0[2]) = 0xfffffff2; ++ *((int *)&__m128_op0[1]) = 0x0001000d; ++ *((int *)&__m128_op0[0]) = 0xfffffff1; ++ *((int *)&__m128_op1[3]) = 0xffff8a17; ++ *((int *)&__m128_op1[2]) = 0xffffc758; ++ *((int *)&__m128_op1[1]) = 0xffff69bb; ++ *((int *)&__m128_op1[0]) = 0xffffad3b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff800000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xff800000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffff1739; ++ *((int *)&__m128_op1[2]) = 0xffff48aa; ++ *((int *)&__m128_op1[1]) = 0xffff2896; ++ *((int *)&__m128_op1[0]) = 0xffff5b88; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000003; ++ *((int *)&__m128_op0[0]) = 0x0000003f; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000003; ++ *((int *)&__m128_op1[0]) = 0x0000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x084d12ce; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x24170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7474f6fd7474fefe; ++ *((unsigned long *)&__m128d_op0[0]) = 0xf474f6fef474f6fe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128d_op1[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0101fe870101fe87; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0101fe8700000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long *)&__m128d_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000007fffa9ed; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7f8000017fffca8b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffff7603; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128d_op1[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000005e695e95; ++ *((unsigned long *)&__m128d_op1[0]) = 0x5e695e96c396b402; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c +new file mode 100644 +index 000000000..ea47baf40 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c +@@ -0,0 +1,269 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000007f; ++ *((int *)&__m128_op0[2]) = 0x0000007f; ++ *((int *)&__m128_op0[1]) = 0x0000007f; ++ *((int *)&__m128_op0[0]) = 0x0000007f; ++ *((int *)&__m128_op1[3]) = 0x3ff00000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xfffc0020; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x2757de72; ++ *((int *)&__m128_op0[2]) = 0x33d771a3; ++ *((int *)&__m128_op0[1]) = 0x166891d5; ++ *((int *)&__m128_op0[0]) = 0x1e8b7eff; ++ *((int *)&__m128_op1[3]) = 0x2757de72; ++ *((int *)&__m128_op1[2]) = 0x33d771a3; ++ *((int *)&__m128_op1[1]) = 0x166891d5; ++ *((int *)&__m128_op1[0]) = 0x1e8b7eff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00fe00ff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffffe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xffffff02; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000000d; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xfffffe03; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xfffffe03; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xbafebb00; ++ *((int *)&__m128_op1[2]) = 0xffd500fe; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xbffffffe; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80000000; ++ *((int *)&__m128_op0[2]) = 0x80000000; ++ *((int *)&__m128_op0[1]) = 0x80000000; ++ *((int *)&__m128_op0[0]) = 0x80000000; ++ *((int *)&__m128_op1[3]) = 0x000000ff; ++ *((int *)&__m128_op1[2]) = 0x0000857a; ++ *((int *)&__m128_op1[1]) = 0x05fafe01; ++ *((int *)&__m128_op1[0]) = 0x01fe000e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fff7fff; ++ *((int *)&__m128_op0[2]) = 0x7fff7fff; ++ *((int *)&__m128_op0[1]) = 0xbf6b8101; ++ *((int *)&__m128_op0[0]) = 0x81018101; ++ *((int *)&__m128_op1[3]) = 0xe3636363; ++ *((int *)&__m128_op1[2]) = 0x63abdf16; ++ *((int *)&__m128_op1[1]) = 0x41f8e080; ++ *((int *)&__m128_op1[0]) = 0x16161198; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000005d5d; ++ *((unsigned long *)&__m128d_op1[1]) = 0x08fdc221bfdb1927; ++ *((unsigned long *)&__m128d_op1[0]) = 0x4303c67e9b7fb213; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fffffff7ffffffb; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000040002; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffff000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000d00000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c +new file mode 100644 +index 000000000..68cb5a52f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xe17fe003; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x0000ffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00190819; ++ *((int *)&__m128_op1[2]) = 0x00190019; ++ *((int *)&__m128_op1[1]) = 0x00190819; ++ *((int *)&__m128_op1[0]) = 0x00190019; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfe800000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0x7fffffff; ++ *((int *)&__m128_op1[2]) = 0x82bb9784; ++ *((int *)&__m128_op1[1]) = 0x7fffffff; ++ *((int *)&__m128_op1[0]) = 0xc6bb97ac; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x7f3f0180; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xa2321469; ++ *((int *)&__m128_op0[0]) = 0x7fd03f7f; ++ *((int *)&__m128_op1[3]) = 0x00000406; ++ *((int *)&__m128_op1[2]) = 0x00000406; ++ *((int *)&__m128_op1[1]) = 0x02020202; ++ *((int *)&__m128_op1[0]) = 0x0202fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xfffffff5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000014; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000014; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xfffc0004; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x67eb85b0; ++ *((int *)&__m128_op0[2]) = 0xb2ebb001; ++ *((int *)&__m128_op0[1]) = 0xc8847ef6; ++ *((int *)&__m128_op0[0]) = 0xed3f2000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0002de46; ++ *((int *)&__m128_op0[2]) = 0x682de060; ++ *((int *)&__m128_op0[1]) = 0x09b50da6; ++ *((int *)&__m128_op0[0]) = 0xe67b8fc0; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x084d12ce; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x24170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00ffffff; ++ *((int *)&__m128_op0[0]) = 0x00ffffff; ++ *((int *)&__m128_op1[3]) = 0x0000feff; ++ *((int *)&__m128_op1[2]) = 0x23560000; ++ *((int *)&__m128_op1[1]) = 0x0000fd16; ++ *((int *)&__m128_op1[0]) = 0x54860000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xfffffffe; ++ *((int *)&__m128_op0[0]) = 0xfffff784; ++ *((int *)&__m128_op1[3]) = 0x0177fff0; ++ *((int *)&__m128_op1[2]) = 0xfffffff0; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x011ff8bc; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0002000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00ff00ff00ff00fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffebd06fffe820c; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fff7ffe7fff3506; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ffffffeffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128d_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128d_op1[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001fffe0001fefc; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0003000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c +new file mode 100644 +index 000000000..d4a86e262 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00e0000000e00000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002a55005501; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002a55000001; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x36280000; ++ *((int *)&__m128_result[1]) = 0x42a00000; ++ *((int *)&__m128_result[0]) = 0x42a02000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xf436f3f5; ++ *((int *)&__m128_op0[0]) = 0x2f4ef4a8; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcfb799f1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0282800002828282; ++ *((int *)&__m128_result[3]) = 0xffffe000; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0xc1f6e000; ++ *((int *)&__m128_result[0]) = 0xbb3e2000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000040004000100; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x36de0000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x3be14000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x403be000; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63637687; ++ *((int *)&__m128_op0[2]) = 0x636316bb; ++ *((int *)&__m128_op0[1]) = 0x63636363; ++ *((int *)&__m128_op0[0]) = 0x63636363; ++ *((unsigned long *)&__m128d_result[1]) = 0x446c6ed0e0000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x446c62d760000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000000ff; ++ *((int *)&__m128_op0[2]) = 0x000000ff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x371fe00000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff7fff7ef; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80808080ffffffff; ++ *((int *)&__m128_result[3]) = 0xffffe000; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0xc6ffe000; ++ *((int *)&__m128_result[0]) = 0xc6fde000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffe0000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffe1ffc100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000400000; ++ *((int *)&__m128_result[3]) = 0xfffc2000; ++ *((int *)&__m128_result[2]) = 0xfff82000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000b3a6; ++ *((int *)&__m128_op0[2]) = 0x000067da; ++ *((int *)&__m128_op0[1]) = 0x00004e42; ++ *((int *)&__m128_op0[0]) = 0x0000c26a; ++ *((unsigned long *)&__m128d_result[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x3789f68000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xffff0000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffe0000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001001001000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4195d926d8018000; ++ *((int *)&__m128_result[3]) = 0x33800000; ++ *((int *)&__m128_result[2]) = 0x35800000; ++ *((int *)&__m128_result[1]) = 0x37800000; ++ *((int *)&__m128_result[0]) = 0x37000000; ++ __m128_out = __lsx_vfcvth_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvth_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((int *)&__m128_result[3]) = 0xffffe000; ++ *((int *)&__m128_result[2]) = 0xffffe000; ++ *((int *)&__m128_result[1]) = 0xffffe000; ++ *((int *)&__m128_result[0]) = 0xffffe000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((int *)&__m128_result[3]) = 0x35200000; ++ *((int *)&__m128_result[2]) = 0x35200000; ++ *((int *)&__m128_result[1]) = 0x35200000; ++ *((int *)&__m128_result[0]) = 0x35200000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000100; ++ *((int *)&__m128_op0[2]) = 0x0f00fe00; ++ *((int *)&__m128_op0[1]) = 0x0000017f; ++ *((int *)&__m128_op0[0]) = 0xff00fe7f; ++ *((unsigned long *)&__m128d_result[1]) = 0x3727f00000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xc7e01fcfe0000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000020; ++ *((int *)&__m128_op0[0]) = 0x00000020; ++ *((unsigned long *)&__m128d_result[1]) = 0x36f0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x36f0000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xbd994889; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0a092444; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x3941248880000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf0bc9a5278285a4a; ++ *((int *)&__m128_result[3]) = 0xc6178000; ++ *((int *)&__m128_result[2]) = 0xbb4a4000; ++ *((int *)&__m128_result[1]) = 0x47050000; ++ *((int *)&__m128_result[0]) = 0x43494000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00040004; ++ *((int *)&__m128_op0[2]) = 0x00040004; ++ *((int *)&__m128_op0[1]) = 0x00040004; ++ *((int *)&__m128_op0[0]) = 0x00040004; ++ *((unsigned long *)&__m128d_result[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x37c0001000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((int *)&__m128_result[3]) = 0xffe00000; ++ *((int *)&__m128_result[2]) = 0xffe00000; ++ *((int *)&__m128_result[1]) = 0xffe00000; ++ *((int *)&__m128_result[0]) = 0xffe00000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffe000; ++ *((int *)&__m128_result[0]) = 0xffffe000; ++ __m128_out = __lsx_vfcvtl_s_h (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x007f7f7f; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x380fdfdfc0000000; ++ __m128d_out = __lsx_vfcvtl_d_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c +new file mode 100644 +index 000000000..e8f4f12b9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c +@@ -0,0 +1,278 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x004200a0; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x004200a0; ++ *((int *)&__m128_op0[0]) = 0x00200001; ++ *((int *)&__m128_op1[3]) = 0x004200a0; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x004200a0; ++ *((int *)&__m128_op1[0]) = 0x00200000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00010001; ++ *((int *)&__m128_op1[2]) = 0x0001007c; ++ *((int *)&__m128_op1[1]) = 0x00010001; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x80808080; ++ *((int *)&__m128_op1[2]) = 0x80808080; ++ *((int *)&__m128_op1[1]) = 0x80808080; ++ *((int *)&__m128_op1[0]) = 0x80808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xfffffffc; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffffc; ++ *((int *)&__m128_op1[3]) = 0x00000001; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000103; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000049000000c0; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffff29; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000100000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ff0000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000002c002400; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128d_op1[0]) = 0x28bf0351ec69b5f2; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000dc300003ffb; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000dc300003ffb; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7fffffff7fffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x7ffffffb; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xbba0c07b51230d5c; ++ *((unsigned long *)&__m128d_op0[0]) = 0xa15f3f9e8763c2b9; ++ *((unsigned long *)&__m128d_op1[1]) = 0xbba0c07b51230d5c; ++ *((unsigned long *)&__m128d_op1[0]) = 0xa15f3f9e8763c2b9; ++ *((int *)&__m128_result[3]) = 0x9d0603db; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x9d0603db; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8101010181010101; ++ *((unsigned long *)&__m128d_op1[0]) = 0x8101010181010101; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffc00000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffdfffe80008000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xffeffff4; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000090; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000090; ++ *((unsigned long *)&__m128d_op1[1]) = 0x004eff6200d2ff76; ++ *((unsigned long *)&__m128d_op1[0]) = 0xff70002800be00a0; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c +new file mode 100644 +index 000000000..85db95762 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c +@@ -0,0 +1,161 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m128d_result[0]) = 0xbff0000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x40cd120000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x4050000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0086000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0082000000000007; ++ *((unsigned long *)&__m128d_result[1]) = 0x4160c00000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x4110000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff8000010f800000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000051649b6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000003e0000003f; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x41945926d8000000; ++ __m128d_out = __lsx_vffinth_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe82fe0200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe82fe0200000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xc177d01fe0000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128d_result[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x40f0001000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x40f3fa0000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xc0fffff000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffintl_d_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c +new file mode 100644 +index 000000000..f8839cfcd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c +@@ -0,0 +1,264 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x438ff81ff81ff820; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128d_result[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128d_result[0]) = 0x43d3e0000013e000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xbff0000000000000; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0674c8868a74fc80; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906; ++ *((unsigned long *)&__m128d_result[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128d_result[0]) = 0xc3818bffe7b7a7b8; ++ __m128d_out = __lsx_vffint_d_l (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((int *)&__m128_result[3]) = 0x4b7f00ff; ++ *((int *)&__m128_result[2]) = 0x4b7f00ff; ++ *((int *)&__m128_result[1]) = 0x4b7f00ff; ++ *((int *)&__m128_result[0]) = 0x4b7f00ff; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000004; ++ *((int *)&__m128_result[3]) = 0x40800000; ++ *((int *)&__m128_result[2]) = 0x4b800000; ++ *((int *)&__m128_result[1]) = 0x47800080; ++ *((int *)&__m128_result[0]) = 0x40800000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x47000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x76f424887fffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x4eede849; ++ *((int *)&__m128_result[0]) = 0x4f000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa352bfac9269e0aa; ++ *((int *)&__m128_result[3]) = 0xce23d33d; ++ *((int *)&__m128_result[2]) = 0x4edd53ea; ++ *((int *)&__m128_result[1]) = 0xceb95a81; ++ *((int *)&__m128_result[0]) = 0xcedb2c3f; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x467fe000; ++ __m128_out = __lsx_vffint_s_w (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0xbf800000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xcf000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x5eff0000; ++ *((int *)&__m128_result[2]) = 0x5eff0000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000e3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfda9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((int *)&__m128_result[3]) = 0x43630000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xdc159371; ++ *((int *)&__m128_result[0]) = 0x4f7fff00; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000040; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x42800000; ++ *((int *)&__m128_result[0]) = 0x42800000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x43800000; ++ *((int *)&__m128_result[0]) = 0x43800000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x59f7fd70; ++ *((int *)&__m128_result[0]) = 0x59f7fd70; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b; ++ *((int *)&__m128_result[3]) = 0x577fff00; ++ *((int *)&__m128_result[2]) = 0x577fff00; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x596f0000; ++ __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c +new file mode 100644 +index 000000000..9150e27ca +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c +@@ -0,0 +1,102 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8493941335f5cc0c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x625a7312befcb21e; ++ *((unsigned long *)&__m128d_result[1]) = 0x43e092728266beba; ++ *((unsigned long *)&__m128d_result[0]) = 0x43d8969cc4afbf2d; ++ __m128d_out = __lsx_vffint_d_lu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_lu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_lu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vffint_d_lu (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001600000016; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001600000016; ++ *((int *)&__m128_result[3]) = 0x41b00000; ++ *((int *)&__m128_result[2]) = 0x41b00000; ++ *((int *)&__m128_result[1]) = 0x41b00000; ++ *((int *)&__m128_result[0]) = 0x41b00000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((int *)&__m128_result[3]) = 0x4f800000; ++ *((int *)&__m128_result[2]) = 0x4f800000; ++ *((int *)&__m128_result[1]) = 0x4f800000; ++ *((int *)&__m128_result[0]) = 0x4f800000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000442800007b50; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0204; ++ *((int *)&__m128_result[3]) = 0x46885000; ++ *((int *)&__m128_result[2]) = 0x46f6a000; ++ *((int *)&__m128_result[1]) = 0x4f800000; ++ *((int *)&__m128_result[0]) = 0x4f7fff02; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vffint_s_wu (__m128i_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c +new file mode 100644 +index 000000000..cc36bf136 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c +@@ -0,0 +1,76 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xc090c40000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000001000000048; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128d_result[0]) = 0xc090380000000000; ++ __m128d_out = __lsx_vflogb_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c +new file mode 100644 +index 000000000..624589620 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c +@@ -0,0 +1,185 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00003004; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xc3080000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x01010101; ++ *((int *)&__m128_op0[2]) = 0x01010101; ++ *((int *)&__m128_op0[1]) = 0x01010101; ++ *((int *)&__m128_op0[0]) = 0x01010101; ++ *((int *)&__m128_result[3]) = 0xc2fa0000; ++ *((int *)&__m128_result[2]) = 0xc2fa0000; ++ *((int *)&__m128_result[1]) = 0xc2fa0000; ++ *((int *)&__m128_result[0]) = 0xc2fa0000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x01ff01ff; ++ *((int *)&__m128_op0[2]) = 0x01ff01ff; ++ *((int *)&__m128_op0[1]) = 0x01ff01ff; ++ *((int *)&__m128_op0[0]) = 0x01ff01ff; ++ *((int *)&__m128_result[3]) = 0xc2f80000; ++ *((int *)&__m128_result[2]) = 0xc2f80000; ++ *((int *)&__m128_result[1]) = 0xc2f80000; ++ *((int *)&__m128_result[0]) = 0xc2f80000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xd46cdc13; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00fe00fe; ++ *((int *)&__m128_op0[2]) = 0x000200fe; ++ *((int *)&__m128_op0[1]) = 0x00fe00fe; ++ *((int *)&__m128_op0[0]) = 0x000200fe; ++ *((int *)&__m128_result[3]) = 0xc2fc0000; ++ *((int *)&__m128_result[2]) = 0xc3040000; ++ *((int *)&__m128_result[1]) = 0xc2fc0000; ++ *((int *)&__m128_result[0]) = 0xc3040000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01010101; ++ *((int *)&__m128_op0[0]) = 0x00000100; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xc2fa0000; ++ *((int *)&__m128_result[0]) = 0xc30d0000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000014; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000014; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xc3110000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xc3110000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x4e3e1337; ++ *((int *)&__m128_op0[0]) = 0x38bb47d2; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0x41e80000; ++ *((int *)&__m128_result[0]) = 0xc1600000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00003ff8; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0xc3080000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf1f181a2; ++ *((int *)&__m128_op0[2]) = 0xf1f1f1b0; ++ *((int *)&__m128_op0[1]) = 0xf1f1f1f1; ++ *((int *)&__m128_op0[0]) = 0xf180f1f1; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vflogb_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c +new file mode 100644 +index 000000000..c5de1ac7a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c +@@ -0,0 +1,251 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8a228acac14e440a; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc77c47cdc0f16549; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffd24271c4; ++ *((unsigned long *)&__m128d_op2[0]) = 0x2711bad1e8e309ed; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffd24271c4; ++ *((unsigned long *)&__m128d_result[0]) = 0x2711bad1e8e309ed; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffe000ffff1fff; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000003f80b0; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0080200000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000080000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000001e; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m128d_op2[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xfff8000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff8000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000009000900; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x9c83e21a22001818; ++ *((unsigned long *)&__m128d_op0[0]) = 0xdd3b8b02563b2d7b; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7f7f7f007f7f7f00; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x7f7f7f007f7f7f00; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff01e41ffff0ffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x5555000054100000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x5555000154100155; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000010; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128d_op2[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128d_op2[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0038d800ff000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00fffe00fffffe00; ++ *((unsigned long *)&__m128d_op2[1]) = 0x8000008000008080; ++ *((unsigned long *)&__m128d_op2[0]) = 0x8080800000800080; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000008000008080; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00ff80ff00ff80ff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000103; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000100000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000103; ++ __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c +new file mode 100644 +index 000000000..6b85e87bd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000002; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000002; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0028e0a1; ++ *((int *)&__m128_op0[2]) = 0xa000a041; ++ *((int *)&__m128_op0[1]) = 0x01000041; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x01000001; ++ *((int *)&__m128_op1[1]) = 0x00010001; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x01000001; ++ *((int *)&__m128_op2[1]) = 0xffffe000; ++ *((int *)&__m128_op2[0]) = 0xffff1fff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x01000001; ++ *((int *)&__m128_result[1]) = 0xffffe000; ++ *((int *)&__m128_result[0]) = 0xffff1fff; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f800000; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x7f800000; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((int *)&__m128_op1[3]) = 0x00000002; ++ *((int *)&__m128_op1[2]) = 0x00000002; ++ *((int *)&__m128_op1[1]) = 0x00000003; ++ *((int *)&__m128_op1[0]) = 0x00000003; ++ *((int *)&__m128_op2[3]) = 0x3fc00000; ++ *((int *)&__m128_op2[2]) = 0x3fc00000; ++ *((int *)&__m128_op2[1]) = 0x3fc00000; ++ *((int *)&__m128_op2[0]) = 0x3fc00000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xc1bdceee; ++ *((int *)&__m128_op0[2]) = 0x242070db; ++ *((int *)&__m128_op0[1]) = 0xe8c7b756; ++ *((int *)&__m128_op0[0]) = 0xd76aa478; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7f400000; ++ *((int *)&__m128_op0[2]) = 0x7f040000; ++ *((int *)&__m128_op0[1]) = 0x7f020000; ++ *((int *)&__m128_op0[0]) = 0x7f020000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x0014002c; ++ *((int *)&__m128_op1[1]) = 0xfffefffe; ++ *((int *)&__m128_op1[0]) = 0x003b0013; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0x3ea5016b; ++ *((int *)&__m128_result[1]) = 0xfffefffe; ++ *((int *)&__m128_result[0]) = 0x3f6fb04d; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x004f0080; ++ *((int *)&__m128_op0[2]) = 0x004f0080; ++ *((int *)&__m128_op0[1]) = 0x004f0080; ++ *((int *)&__m128_op0[0]) = 0x004f0080; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x7fff7fff; ++ *((int *)&__m128_op2[2]) = 0x7fff7fff; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7fff7fff; ++ *((int *)&__m128_result[2]) = 0x7fff7fff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x3d3d3d3d; ++ *((int *)&__m128_op0[2]) = 0x3d3d3d3d; ++ *((int *)&__m128_op0[1]) = 0x3d3d3d3d; ++ *((int *)&__m128_op0[0]) = 0x3d3d3d3d; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00100000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x0000bd3d; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00050005; ++ *((int *)&__m128_op1[2]) = 0x00050005; ++ *((int *)&__m128_op1[1]) = 0x00050005; ++ *((int *)&__m128_op1[0]) = 0x00050005; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xe500c085; ++ *((int *)&__m128_op0[2]) = 0xc000c005; ++ *((int *)&__m128_op0[1]) = 0xe5c1a185; ++ *((int *)&__m128_op0[0]) = 0xc48004c5; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffc000; ++ *((int *)&__m128_op1[0]) = 0xffffc005; ++ *((int *)&__m128_op2[3]) = 0xff550025; ++ *((int *)&__m128_op2[2]) = 0x002a004b; ++ *((int *)&__m128_op2[1]) = 0x00590013; ++ *((int *)&__m128_op2[0]) = 0x005cffca; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffc000; ++ *((int *)&__m128_result[0]) = 0xffffc005; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00fe0001; ++ *((int *)&__m128_op1[2]) = 0x00cf005f; ++ *((int *)&__m128_op1[1]) = 0x7fff7fff; ++ *((int *)&__m128_op1[0]) = 0x7fff7f00; ++ *((int *)&__m128_op2[3]) = 0x5d7f5d00; ++ *((int *)&__m128_op2[2]) = 0x7f6a007f; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x5d7f5d00; ++ *((int *)&__m128_result[2]) = 0x7f6a007f; ++ *((int *)&__m128_result[1]) = 0x7fff7fff; ++ *((int *)&__m128_result[0]) = 0x7fff7f00; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00680486; ++ *((int *)&__m128_op0[2]) = 0xffffffda; ++ *((int *)&__m128_op0[1]) = 0xffff913b; ++ *((int *)&__m128_op0[0]) = 0xb9951901; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x01030103; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00200060; ++ *((int *)&__m128_op2[0]) = 0x00200060; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xffffffda; ++ *((int *)&__m128_result[1]) = 0xffff913b; ++ *((int *)&__m128_result[0]) = 0x001fed4d; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x001a001a; ++ *((int *)&__m128_op0[2]) = 0x001a0008; ++ *((int *)&__m128_op0[1]) = 0x001a001a; ++ *((int *)&__m128_op0[0]) = 0x001a000b; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xff800001; ++ *((int *)&__m128_op1[0]) = 0x0f800000; ++ *((int *)&__m128_op2[3]) = 0xff800000; ++ *((int *)&__m128_op2[2]) = 0xff800000; ++ *((int *)&__m128_op2[1]) = 0xff800000; ++ *((int *)&__m128_op2[0]) = 0xff800000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffc00001; ++ *((int *)&__m128_result[0]) = 0xff800000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfe3bfb01; ++ *((int *)&__m128_op0[2]) = 0xfe3bfe01; ++ *((int *)&__m128_op0[1]) = 0xfe03fe3f; ++ *((int *)&__m128_op0[0]) = 0xfe01fa21; ++ *((int *)&__m128_op1[3]) = 0xfe3bfb01; ++ *((int *)&__m128_op1[2]) = 0xfe3bfe01; ++ *((int *)&__m128_op1[1]) = 0xfe03fe3f; ++ *((int *)&__m128_op1[0]) = 0xfe01fa21; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffe001; ++ *((int *)&__m128_op0[2]) = 0xffffe001; ++ *((int *)&__m128_op0[1]) = 0xffffe001; ++ *((int *)&__m128_op0[0]) = 0xffffe001; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffe000; ++ *((int *)&__m128_op1[0]) = 0x01ffe200; ++ *((int *)&__m128_op2[3]) = 0x04040383; ++ *((int *)&__m128_op2[2]) = 0x83838404; ++ *((int *)&__m128_op2[1]) = 0x04040383; ++ *((int *)&__m128_op2[0]) = 0x83838404; ++ *((int *)&__m128_result[3]) = 0xffffe001; ++ *((int *)&__m128_result[2]) = 0xffffe001; ++ *((int *)&__m128_result[1]) = 0xffffe001; ++ *((int *)&__m128_result[0]) = 0xffffe001; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x18171615; ++ *((int *)&__m128_op0[2]) = 0x17161514; ++ *((int *)&__m128_op0[1]) = 0x16151413; ++ *((int *)&__m128_op0[0]) = 0x151d3756; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x39412488; ++ *((int *)&__m128_op1[0]) = 0x80000000; ++ *((int *)&__m128_op2[3]) = 0x3ff00000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x40f3fa00; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xbff00000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xc0f3fa00; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000005; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x3ddc5dac; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63636b6a; ++ *((int *)&__m128_op0[2]) = 0xfe486741; ++ *((int *)&__m128_op0[1]) = 0x41f8e880; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xe3636363; ++ *((int *)&__m128_op1[2]) = 0x63abdf16; ++ *((int *)&__m128_op1[1]) = 0x41f8e080; ++ *((int *)&__m128_op1[0]) = 0x16161198; ++ *((int *)&__m128_op2[3]) = 0x00c27580; ++ *((int *)&__m128_op2[2]) = 0x00bccf42; ++ *((int *)&__m128_op2[1]) = 0x00a975be; ++ *((int *)&__m128_op2[0]) = 0x00accf03; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0xff800000; ++ *((int *)&__m128_result[1]) = 0x4471fb84; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c +new file mode 100644 +index 000000000..442473fb4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c +@@ -0,0 +1,200 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m128d_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128d_result[0]) = 0x0400040004000400; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128d_result[0]) = 0x01ff01ff01ff01ff; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffcfffcfffcfffc; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x000000000000ffff; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfdfef9ff0efff900; ++ *((unsigned long *)&__m128d_result[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_result[0]) = 0x6363636363636363; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128d_op0[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128d_result[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128d_result[0]) = 0x377b810912c0e000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128d_op0[0]) = 0xc3818bffe7b7a7b8; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x17c64aaef639f093; ++ *((unsigned long *)&__m128d_op0[0]) = 0xdb8f439722ec502d; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x17c64aaef639f093; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128d_result[0]) = 0x00000000ff800000; ++ __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000c000ffffc000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000958affff995d; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c +new file mode 100644 +index 000000000..876588827 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x0000ffff; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x0000ffff; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xc2409eda; ++ *((int *)&__m128_op1[2]) = 0xb019323f; ++ *((int *)&__m128_op1[1]) = 0x460f3b39; ++ *((int *)&__m128_op1[0]) = 0x3ef4be3a; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x460f3b39; ++ *((int *)&__m128_result[0]) = 0x3ef4be3a; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000001; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000001; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xfefd7f7f; ++ *((int *)&__m128_op1[2]) = 0x7f7f7f7e; ++ *((int *)&__m128_op1[1]) = 0xdffdbffe; ++ *((int *)&__m128_op1[0]) = 0xba6f5543; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x7f7f7f7e; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff84fff4; ++ *((int *)&__m128_op0[2]) = 0xff84fff4; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff0; ++ *((int *)&__m128_op1[3]) = 0xff84fff4; ++ *((int *)&__m128_op1[2]) = 0xff84fff4; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xfffffff0; ++ *((int *)&__m128_result[3]) = 0xffc4fff4; ++ *((int *)&__m128_result[2]) = 0xffc4fff4; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xfffffff0; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00007fff; ++ *((int *)&__m128_op1[2]) = 0x00007fff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00007fff; ++ *((int *)&__m128_result[2]) = 0x00007fff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000001; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x01010001; ++ *((int *)&__m128_op0[0]) = 0x01010001; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00020000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00020000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00020000; ++ *((int *)&__m128_result[1]) = 0x01010001; ++ *((int *)&__m128_result[0]) = 0x01010001; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000020; ++ *((int *)&__m128_op1[2]) = 0x00000020; ++ *((int *)&__m128_op1[1]) = 0x0000001f; ++ *((int *)&__m128_op1[0]) = 0x0000001f; ++ *((int *)&__m128_result[3]) = 0x00000020; ++ *((int *)&__m128_result[2]) = 0x00000020; ++ *((int *)&__m128_result[1]) = 0x0000001f; ++ *((int *)&__m128_result[0]) = 0x0000001f; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xf3040705; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xf3040705; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xf3040705; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000004; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000004; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000004; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000004; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000004; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000004; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000ffff; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0x0000ffff; ++ *((int *)&__m128_op0[0]) = 0x0000fffe; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffe5; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffe5; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x13121110; ++ *((int *)&__m128_op0[2]) = 0x1211100f; ++ *((int *)&__m128_op0[1]) = 0x11100f0e; ++ *((int *)&__m128_op0[0]) = 0x100f0e0d; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff3; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000008; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000088; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000008; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000088; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x52525252; ++ *((int *)&__m128_op0[2]) = 0xadadadad; ++ *((int *)&__m128_op0[1]) = 0x52525252; ++ *((int *)&__m128_op0[0]) = 0xadadadad; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xadadadad; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0xadadadad; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x0000ffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x0000ffff; ++ __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c +new file mode 100644 +index 000000000..c2766d5c6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c +@@ -0,0 +1,155 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000800000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000af555555555; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000af555555555; ++ *((unsigned long *)&__m128d_result[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128d_result[0]) = 0x3918371635143312; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000010000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x10f8000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128d_result[1]) = 0x10f8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x00000000ff800000; ++ __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x80000000fff6fc00; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000080000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000158; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfffe0004fffe0004; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c +new file mode 100644 +index 000000000..5fcdedd3f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c +@@ -0,0 +1,230 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xf436f3f5; ++ *((int *)&__m128_op0[0]) = 0x2f4ef4a8; ++ *((int *)&__m128_op1[3]) = 0xff800000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0xff800000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xff800000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xff800000; ++ *((int *)&__m128_result[0]) = 0x2f4ef4a8; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000800; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000800; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000800; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000800; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xc0c0c000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00800080; ++ *((int *)&__m128_op1[2]) = 0x00800080; ++ *((int *)&__m128_op1[1]) = 0x0080006b; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00800080; ++ *((int *)&__m128_result[2]) = 0xc0c0c000; ++ *((int *)&__m128_result[1]) = 0x0080006b; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x80000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x80000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xffffffff; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0xffffffff; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0xff01ff01; ++ *((int *)&__m128_op1[2]) = 0x0000ff7d; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x0000fffc; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xdfa6e0c6; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xd46cdc13; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x01010101; ++ *((int *)&__m128_op0[2]) = 0x01010101; ++ *((int *)&__m128_op0[1]) = 0x010101fe; ++ *((int *)&__m128_op0[0]) = 0x0101fe87; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0xffff0000; ++ *((int *)&__m128_op1[2]) = 0xffff0000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c +new file mode 100644 +index 000000000..96b14aad6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c +@@ -0,0 +1,196 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xef0179a47c793879; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9f9e7e3e9ea3ff41; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffc000007fc00000; ++ *((unsigned long *)&__m128d_result[0]) = 0x9e801ffc7fc00000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000ffff00000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000008800022; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128d_op2[1]) = 0xb8ec43befe38e64b; ++ *((unsigned long *)&__m128d_op2[0]) = 0x6477d042343cce24; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffbfffffffbf; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000060000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffffffffffff000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffafffffffa; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffafffffffa; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128d_op1[0]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long *)&__m128d_op1[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128d_op1[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0303030303030303; ++ *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000; ++ __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128d_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffffffffffffffe; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x00000000b5207f80; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000009000900; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op2[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x00000000ffffffff; ++ __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c +new file mode 100644 +index 000000000..bf8414b49 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xfffffffe; ++ *((int *)&__m128_op0[0]) = 0xbe6ed565; ++ *((int *)&__m128_op1[3]) = 0x195f307a; ++ *((int *)&__m128_op1[2]) = 0x5d04acbb; ++ *((int *)&__m128_op1[1]) = 0x6a1a3fbb; ++ *((int *)&__m128_op1[0]) = 0x3c90260e; ++ *((int *)&__m128_op2[3]) = 0xffffffff; ++ *((int *)&__m128_op2[2]) = 0xffffffff; ++ *((int *)&__m128_op2[1]) = 0xfffffffe; ++ *((int *)&__m128_op2[0]) = 0xbe6ed565; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xfffffffe; ++ *((int *)&__m128_result[0]) = 0x3e730941; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xff01ff01; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0xffffffff; ++ *((int *)&__m128_op2[2]) = 0xffffffff; ++ *((int *)&__m128_op2[1]) = 0xffffffff; ++ *((int *)&__m128_op2[0]) = 0xff01ff01; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0x7f01ff01; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0xffffffff; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0xffffffff; ++ *((int *)&__m128_op2[3]) = 0x00307028; ++ *((int *)&__m128_op2[2]) = 0x003f80b0; ++ *((int *)&__m128_op2[1]) = 0x0040007f; ++ *((int *)&__m128_op2[0]) = 0xff800000; ++ *((int *)&__m128_result[3]) = 0x80307028; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x8040007f; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000049; ++ *((int *)&__m128_op0[2]) = 0x0000004d; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000001; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000001; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000001; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffff0000; ++ *((int *)&__m128_op0[1]) = 0x00ff0000; ++ *((int *)&__m128_op0[0]) = 0x00ff0000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000800; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0xffffffff; ++ *((int *)&__m128_op2[2]) = 0xfffff800; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xfffff800; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00030000; ++ *((int *)&__m128_op0[2]) = 0x00010000; ++ *((int *)&__m128_op0[1]) = 0x00020000; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((int *)&__m128_op1[3]) = 0x3f800000; ++ *((int *)&__m128_op1[2]) = 0x3f800000; ++ *((int *)&__m128_op1[1]) = 0x3f800000; ++ *((int *)&__m128_op1[0]) = 0x3f800000; ++ *((int *)&__m128_op2[3]) = 0x00030000; ++ *((int *)&__m128_op2[2]) = 0x00010000; ++ *((int *)&__m128_op2[1]) = 0x00020000; ++ *((int *)&__m128_op2[0]) = 0x00010000; ++ *((int *)&__m128_result[3]) = 0x80060000; ++ *((int *)&__m128_result[2]) = 0x80020000; ++ *((int *)&__m128_result[1]) = 0x80040000; ++ *((int *)&__m128_result[0]) = 0x80020000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000008; ++ *((int *)&__m128_op0[2]) = 0x97957687; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000408; ++ *((int *)&__m128_op1[3]) = 0x00000008; ++ *((int *)&__m128_op1[2]) = 0x97957687; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000408; ++ *((int *)&__m128_op2[3]) = 0x00010001; ++ *((int *)&__m128_op2[2]) = 0x00010001; ++ *((int *)&__m128_op2[1]) = 0x00010001; ++ *((int *)&__m128_op2[0]) = 0x04000800; ++ *((int *)&__m128_result[3]) = 0x80010001; ++ *((int *)&__m128_result[2]) = 0x80010001; ++ *((int *)&__m128_result[1]) = 0x80010001; ++ *((int *)&__m128_result[0]) = 0x84000800; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffc2ffe7; ++ *((int *)&__m128_op0[2]) = 0x00000007; ++ *((int *)&__m128_op0[1]) = 0x0000ffc1; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((int *)&__m128_op1[3]) = 0xffc2ffe7; ++ *((int *)&__m128_op1[2]) = 0x00000007; ++ *((int *)&__m128_op1[1]) = 0x0000ffc1; ++ *((int *)&__m128_op1[0]) = 0x00010001; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x000ffc2f; ++ *((int *)&__m128_op2[1]) = 0x00201df0; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0xffc2ffe7; ++ *((int *)&__m128_result[2]) = 0x800ffc2f; ++ *((int *)&__m128_result[1]) = 0x80201df0; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000005; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80808080; ++ *((int *)&__m128_op0[2]) = 0x80808080; ++ *((int *)&__m128_op0[1]) = 0x80808080; ++ *((int *)&__m128_op0[0]) = 0x80800008; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x31313131; ++ *((int *)&__m128_op0[0]) = 0x31313131; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x31313131; ++ *((int *)&__m128_op1[0]) = 0x31313131; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000008; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000008; ++ *((int *)&__m128_result[1]) = 0xa2f54a1e; ++ *((int *)&__m128_result[0]) = 0xa2f54a1e; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xa486c90f; ++ *((int *)&__m128_op0[2]) = 0x157ca12e; ++ *((int *)&__m128_op0[1]) = 0x58bcc201; ++ *((int *)&__m128_op0[0]) = 0x2e635d65; ++ *((int *)&__m128_op1[3]) = 0x6d564875; ++ *((int *)&__m128_op1[2]) = 0xf8760005; ++ *((int *)&__m128_op1[1]) = 0x8dc5a4d1; ++ *((int *)&__m128_op1[0]) = 0x79ffa22f; ++ *((int *)&__m128_op2[3]) = 0xffffffff; ++ *((int *)&__m128_op2[2]) = 0xd2436487; ++ *((int *)&__m128_op2[1]) = 0x0fa96b88; ++ *((int *)&__m128_op2[0]) = 0x5f94ab13; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xd24271c4; ++ *((int *)&__m128_result[1]) = 0x2711bad1; ++ *((int *)&__m128_result[0]) = 0xe8e309ed; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x00000000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x80000000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x007ffd00; ++ *((int *)&__m128_op2[0]) = 0x01400840; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x007ffd00; ++ *((int *)&__m128_result[0]) = 0x01400840; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0x00000000; ++ *((int *)&__m128_op2[2]) = 0x00000000; ++ *((int *)&__m128_op2[1]) = 0x7f800000; ++ *((int *)&__m128_op2[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x80000000; ++ *((int *)&__m128_result[2]) = 0x80000000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x80000000; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_op1[3]) = 0x00000000; ++ *((int *)&__m128_op1[2]) = 0x00000000; ++ *((int *)&__m128_op1[1]) = 0x00000000; ++ *((int *)&__m128_op1[0]) = 0x00000000; ++ *((int *)&__m128_op2[3]) = 0xcd636363; ++ *((int *)&__m128_op2[2]) = 0xcd636363; ++ *((int *)&__m128_op2[1]) = 0xcd636363; ++ *((int *)&__m128_op2[0]) = 0xcd636363; ++ *((int *)&__m128_result[3]) = 0xcd636363; ++ *((int *)&__m128_result[2]) = 0xcd636363; ++ *((int *)&__m128_result[1]) = 0xcd636363; ++ *((int *)&__m128_result[0]) = 0xcd636363; ++ __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c +new file mode 100644 +index 000000000..c60ff2b46 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c +@@ -0,0 +1,230 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0003000300030003; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0003000700020005; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfd200ed2fd370775; ++ *((unsigned long *)&__m128d_op0[0]) = 0x96198318780e32c5; ++ *((unsigned long *)&__m128d_result[1]) = 0xfd200ed2fd370775; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfrint_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128d_op0[0]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128d_result[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128d_result[0]) = 0xe0404041e0404041; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000868686868686; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrne_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0xfffc002000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9c9c9c9c00000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000007f00ff00ff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x3ff0000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000077af9450; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x3ff0000000000000; ++ __m128d_out = __lsx_vfrintrp_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ffffff02fff4; ++ *((unsigned long *)&__m128d_result[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x6a57a30ff0000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x6a57a30ff0000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff02000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x1f81e3779b97f4a8; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffff02000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrm_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x98ff98ff220e220d; ++ *((unsigned long *)&__m128d_op0[0]) = 0xa2e1a2601ff01ff0; ++ *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000abba7980; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000ccf98000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128d_result[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_result[0]) = 0xfe03fe3ffe01fa21; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128d_op0[0]) = 0x110053f401e7cced; ++ *((unsigned long *)&__m128d_result[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfrintrz_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c +new file mode 100644 +index 000000000..12cb02303 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++/* { dg-timeout 500 } */ ++#include "../simd_correctness_check.h" ++#include ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00100010; ++ *((int *)&__m128_op0[2]) = 0x00030000; ++ *((int *)&__m128_op0[1]) = 0x00060002; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0xca02f854; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x00013fa0; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xca02f854; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000000ad; ++ *((int *)&__m128_op0[2]) = 0x00007081; ++ *((int *)&__m128_op0[1]) = 0x00000351; ++ *((int *)&__m128_op0[0]) = 0x0000b5f2; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ff00ef; ++ *((int *)&__m128_op0[2]) = 0x00ff010f; ++ *((int *)&__m128_op0[1]) = 0x00ff00ff; ++ *((int *)&__m128_op0[0]) = 0x00ff010f; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrint_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00130013; ++ *((int *)&__m128_op0[2]) = 0x00130013; ++ *((int *)&__m128_op0[1]) = 0x00130013; ++ *((int *)&__m128_op0[0]) = 0x00130013; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x20202020; ++ *((int *)&__m128_op0[2]) = 0x20202020; ++ *((int *)&__m128_op0[1]) = 0x20202020; ++ *((int *)&__m128_op0[0]) = 0x20207fff; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x01f50000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020004; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffbfffb; ++ *((int *)&__m128_op0[2]) = 0xfffbfffb; ++ *((int *)&__m128_op0[1]) = 0xfffbfffb; ++ *((int *)&__m128_op0[0]) = 0xfffbfffb; ++ *((int *)&__m128_result[3]) = 0xfffbfffb; ++ *((int *)&__m128_result[2]) = 0xfffbfffb; ++ *((int *)&__m128_result[1]) = 0xfffbfffb; ++ *((int *)&__m128_result[0]) = 0xfffbfffb; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0ff780a1; ++ *((int *)&__m128_op0[2]) = 0x0efc01af; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xfe7f0000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0xfe7f0000; ++ __m128_out = __lsx_vfrintrne_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xefffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0xefffffff; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffff00; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffff00; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffff00; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffff00; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffb96b; ++ *((int *)&__m128_op0[2]) = 0xffff57c9; ++ *((int *)&__m128_op0[1]) = 0xffff6080; ++ *((int *)&__m128_op0[0]) = 0xffff4417; ++ *((int *)&__m128_result[3]) = 0xffffb96b; ++ *((int *)&__m128_result[2]) = 0xffff57c9; ++ *((int *)&__m128_result[1]) = 0xffff6080; ++ *((int *)&__m128_result[0]) = 0xffff4417; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ff00ff; ++ *((int *)&__m128_op0[2]) = 0x00ff00ff; ++ *((int *)&__m128_op0[1]) = 0x62cbf96e; ++ *((int *)&__m128_op0[0]) = 0x4acfaf40; ++ *((int *)&__m128_result[3]) = 0x3f800000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x62cbf96e; ++ *((int *)&__m128_result[0]) = 0x4acfaf40; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00002000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x1fe02000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x3f800000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63636363; ++ *((int *)&__m128_op0[2]) = 0x63abdf16; ++ *((int *)&__m128_op0[1]) = 0x41f8e080; ++ *((int *)&__m128_op0[0]) = 0x16161198; ++ *((int *)&__m128_result[3]) = 0x63636363; ++ *((int *)&__m128_result[2]) = 0x63abdf16; ++ *((int *)&__m128_result[1]) = 0x42000000; ++ *((int *)&__m128_result[0]) = 0x3f800000; ++ __m128_out = __lsx_vfrintrp_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrm_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xa5c4c774; ++ *((int *)&__m128_op0[2]) = 0x856ba83b; ++ *((int *)&__m128_op0[1]) = 0x8003caef; ++ *((int *)&__m128_op0[0]) = 0x54691124; ++ *((int *)&__m128_result[3]) = 0xbf800000; ++ *((int *)&__m128_result[2]) = 0xbf800000; ++ *((int *)&__m128_result[1]) = 0xbf800000; ++ *((int *)&__m128_result[0]) = 0x54691124; ++ __m128_out = __lsx_vfrintrm_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00010002; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xff960015; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffd60015; ++ __m128_out = __lsx_vfrintrm_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x3c992b2e; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffff730f; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffff730f; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000016; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x18171615; ++ *((int *)&__m128_op0[2]) = 0x17161514; ++ *((int *)&__m128_op0[1]) = 0x16151413; ++ *((int *)&__m128_op0[0]) = 0x15141312; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x62cbf96e; ++ *((int *)&__m128_op0[2]) = 0x4acfaf40; ++ *((int *)&__m128_op0[1]) = 0xf0bc9a52; ++ *((int *)&__m128_op0[0]) = 0x78285a4a; ++ *((int *)&__m128_result[3]) = 0x62cbf96e; ++ *((int *)&__m128_result[2]) = 0x4acfaf40; ++ *((int *)&__m128_result[1]) = 0xf0bc9a52; ++ *((int *)&__m128_result[0]) = 0x78285a4a; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfrintrz_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c +new file mode 100644 +index 000000000..ac0ade8b1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c +@@ -0,0 +1,218 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020100fedd0c00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0005000501800005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020100fedd0008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op2[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000003f803f4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000003f803f4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001003f803f4; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000020000007d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000746400016388; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000586100015567; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000020000007d; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x61608654a2d4f6da; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ff08ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x36fbdfdcffdcffdc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_op2[1]) = 0x1f1f1f1f1f1f1f00; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1f1f1f27332b9f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x36fbdfdcffdc0008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000aaaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000545cab1d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000081a83bea; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00d3007c014e00bd; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000aaaa; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003a0000003a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x37c0001000000008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080800008; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00081f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000400080003fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bc2000007e10; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400080003fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bc2000007e04; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a753500950fa306; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff14eb54ab; ++ *((unsigned long *)&__m128i_op1[0]) = 0x14ea6a002a406a00; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a753500950fa306; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_result[0]) = 0x27b169bbb8140001; ++ __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000155; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff100000000000; ++ __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c +new file mode 100644 +index 000000000..a2b110f21 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c +@@ -0,0 +1,209 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0027002a00030018; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f4300177f7a7f59; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0027002a00080018; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f4300177f7a7f59; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000007f00000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000110000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007f00000004; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x75b043c4d17db125; ++ *((unsigned long *)&__m128i_op0[0]) = 0xeef8227b4f8017b1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x75b043c4007db125; ++ *((unsigned long *)&__m128i_result[0]) = 0xeef8227b4f8017b1; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000010a000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ffff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000010a000b; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5b35342c979955da; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000009; ++ *((unsigned long *)&__m128i_result[0]) = 0x5b35342c970455da; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000000000000; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00d3012b015700bb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001002affca0070; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012b015700bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x00010000ffca0070; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000bf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000002bb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00080000fffe0001; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000545cffffab1d; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff81a800003bea; ++ *((unsigned long *)&__m128i_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000545cffff0001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff81a800003bea; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001b; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000000000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x379674c000000000; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a001a001a0008; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a001a001a000b; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x02f3030303030303; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x02f3030303100303; ++ __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff941d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff941d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007770ffff941d; ++ __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c +new file mode 100644 +index 000000000..8a35dfe24 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c +@@ -0,0 +1,216 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffa486c90f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000058bcc201; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffa486c90f; ++ *((unsigned long *)&__m128d_result[0]) = 0x1f52d710bf295626; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffff01ff01; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000be00be; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x1f1b917c9f3d5e05; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x1f81e3779b97f4a8; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128d_op0[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128d_result[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128d_result[0]) = 0x2006454690d3de87; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128d_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128d_result[1]) = 0x5ff6a0a40ea8f47c; ++ *((unsigned long *)&__m128d_result[0]) = 0x5ff6a0a40e9da42a; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x61608654a2d4f6da; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128d_result[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long *)&__m128d_result[0]) = 0x7fff7fff7fff7fff; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrsqrt_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000fffa0000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000fffa0000; ++ *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128d_result[1]) = 0x805ffffe01001fe0; ++ *((unsigned long *)&__m128d_result[0]) = 0x9a49e11102834d70; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128d_op0[0]) = 0x5252dcdcdcdcdcdc; ++ *((unsigned long *)&__m128d_result[1]) = 0x2d8bf1f8fc7e3f20; ++ *((unsigned long *)&__m128d_result[0]) = 0x2d8b24b936d1b24d; ++ __m128d_out = __lsx_vfrecip_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c +new file mode 100644 +index 000000000..ffd80540b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c +@@ -0,0 +1,372 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0xfe07e5fe; ++ *((int *)&__m128_op0[2]) = 0xfefdddfe; ++ *((int *)&__m128_op0[1]) = 0x00020100; ++ *((int *)&__m128_op0[0]) = 0xfedd0c00; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x1e801ffc; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff00ff00; ++ *((int *)&__m128_op0[2]) = 0xff00ff00; ++ *((int *)&__m128_op0[1]) = 0xff00ff00; ++ *((int *)&__m128_op0[0]) = 0xff00ff00; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x7fc00000; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x8c7fc73a; ++ *((int *)&__m128_op0[2]) = 0x137e54af; ++ *((int *)&__m128_op0[1]) = 0xbc84cf6f; ++ *((int *)&__m128_op0[0]) = 0x76208329; ++ *((int *)&__m128_result[3]) = 0x7fc00000; ++ *((int *)&__m128_result[2]) = 0x297f29fe; ++ *((int *)&__m128_result[1]) = 0x7fc00000; ++ *((int *)&__m128_result[0]) = 0x5acab5a5; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffff9727; ++ *((int *)&__m128_op0[2]) = 0xffff9727; ++ *((int *)&__m128_op0[1]) = 0xfffffe79; ++ *((int *)&__m128_op0[0]) = 0xffffba5f; ++ *((int *)&__m128_result[3]) = 0xffff9727; ++ *((int *)&__m128_result[2]) = 0xffff9727; ++ *((int *)&__m128_result[1]) = 0xfffffe79; ++ *((int *)&__m128_result[0]) = 0xffffba5f; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x00000000; ++ *((int *)&__m128_result[2]) = 0x00000000; ++ *((int *)&__m128_result[1]) = 0x00000000; ++ *((int *)&__m128_result[0]) = 0x00000000; ++ __m128_out = __lsx_vfsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xfff8fff8; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0xfff80000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0xfff8fff8; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0xfff80000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0xffffffff; ++ *((int *)&__m128_result[0]) = 0xffffffff; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x1f1b917c; ++ *((int *)&__m128_op0[0]) = 0x9f3d5e05; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x4fa432d6; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x12835580; ++ *((int *)&__m128_op0[0]) = 0xb880eb98; ++ *((int *)&__m128_result[3]) = 0xffffffff; ++ *((int *)&__m128_result[2]) = 0xffffffff; ++ *((int *)&__m128_result[1]) = 0x55fcbad1; ++ *((int *)&__m128_result[0]) = 0x7fc00000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x06070607; ++ *((int *)&__m128_op0[2]) = 0x00000807; ++ *((int *)&__m128_op0[1]) = 0x0707f8f8; ++ *((int *)&__m128_op0[0]) = 0x03e8157e; ++ *((int *)&__m128_result[3]) = 0x5c303f97; ++ *((int *)&__m128_result[2]) = 0x61ff9049; ++ *((int *)&__m128_result[1]) = 0x5bafa1dd; ++ *((int *)&__m128_result[0]) = 0x5d3e1e1d; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfff7fffe; ++ *((int *)&__m128_op0[2]) = 0xfffa01ff; ++ *((int *)&__m128_op0[1]) = 0xfffbfffe; ++ *((int *)&__m128_op0[0]) = 0xfffe01ff; ++ *((int *)&__m128_result[3]) = 0xfff7fffe; ++ *((int *)&__m128_result[2]) = 0xfffa01ff; ++ *((int *)&__m128_result[1]) = 0xfffbfffe; ++ *((int *)&__m128_result[0]) = 0xfffe01ff; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x45000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x44000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x3cb504f3; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x3d3504f3; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020001; ++ *((int *)&__m128_op0[0]) = 0x00020002; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x607fffc0; ++ *((int *)&__m128_result[0]) = 0x607fff80; ++ __m128_out = __lsx_vfrsqrt_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000002; ++ *((int *)&__m128_op0[2]) = 0x00000002; ++ *((int *)&__m128_op0[1]) = 0x00000003; ++ *((int *)&__m128_op0[0]) = 0x00000003; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf6e91c00; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x51cfd7c0; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x880c91b8; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x2d1da85b; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffffffa; ++ *((int *)&__m128_op0[2]) = 0xfffffffa; ++ *((int *)&__m128_op0[1]) = 0xfffffffa; ++ *((int *)&__m128_op0[0]) = 0xfffffffa; ++ *((int *)&__m128_result[3]) = 0xfffffffa; ++ *((int *)&__m128_result[2]) = 0xfffffffa; ++ *((int *)&__m128_result[1]) = 0xfffffffa; ++ *((int *)&__m128_result[0]) = 0xfffffffa; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffff0001; ++ *((int *)&__m128_op0[2]) = 0xffff0001; ++ *((int *)&__m128_op0[1]) = 0xffff0001; ++ *((int *)&__m128_op0[0]) = 0xffff0001; ++ *((int *)&__m128_result[3]) = 0xffff0001; ++ *((int *)&__m128_result[2]) = 0xffff0001; ++ *((int *)&__m128_result[1]) = 0xffff0001; ++ *((int *)&__m128_result[0]) = 0xffff0001; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0a000000; ++ *((int *)&__m128_op0[2]) = 0x0a000000; ++ *((int *)&__m128_op0[1]) = 0x0a000000; ++ *((int *)&__m128_op0[0]) = 0x0a000000; ++ *((int *)&__m128_result[3]) = 0x75000000; ++ *((int *)&__m128_result[2]) = 0x75000000; ++ *((int *)&__m128_result[1]) = 0x75000000; ++ *((int *)&__m128_result[0]) = 0x75000000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((int *)&__m128_result[3]) = 0x7f800000; ++ *((int *)&__m128_result[2]) = 0x7f800000; ++ *((int *)&__m128_result[1]) = 0x7f800000; ++ *((int *)&__m128_result[0]) = 0x7f800000; ++ __m128_out = __lsx_vfrecip_s (__m128_op0); ++ ASSERTEQ_32 (__LINE__, __m128_result, __m128_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c +new file mode 100644 +index 000000000..8d0d56632 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c +@@ -0,0 +1,349 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x004f1fcfd01f9f9f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x9c7c266e3faa293c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000015d926c7; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000e41b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000777777777777; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff7777ffff7777; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000004000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xff80ffffffffff80; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000ff80ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xbff0000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xc0f3fa0080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffec060; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_l_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c +new file mode 100644 +index 000000000..5dba807f6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c +@@ -0,0 +1,695 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00d4ccb8; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00124888; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfff00000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfff00000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80000000; ++ *((int *)&__m128_op0[2]) = 0xffffd860; ++ *((int *)&__m128_op0[1]) = 0x7fffffff; ++ *((int *)&__m128_op0[0]) = 0x80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00008000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00008000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff80ffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7ffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x4f804f80; ++ *((int *)&__m128_op0[0]) = 0x4f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0000007b; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000600; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x3f800000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x04870ba0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00009c7c; ++ *((int *)&__m128_op0[0]) = 0x00007176; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0667ae56; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftinth_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x887c8beb; ++ *((int *)&__m128_op0[2]) = 0x969e00f2; ++ *((int *)&__m128_op0[1]) = 0x101f8b68; ++ *((int *)&__m128_op0[0]) = 0x0b6f8095; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00020000; ++ *((int *)&__m128_op0[2]) = 0x00020000; ++ *((int *)&__m128_op0[1]) = 0x000001fc; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020000; ++ *((int *)&__m128_op0[0]) = 0xffff0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0a752a55; ++ *((int *)&__m128_op0[1]) = 0x0a753500; ++ *((int *)&__m128_op0[0]) = 0xa9fa0d06; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrnel_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fffffff; ++ *((int *)&__m128_op0[2]) = 0x7fffffff; ++ *((int *)&__m128_op0[1]) = 0x7fffffff; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000d0254; ++ *((int *)&__m128_op0[2]) = 0x0000007e; ++ *((int *)&__m128_op0[1]) = 0x00000014; ++ *((int *)&__m128_op0[0]) = 0x00140014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x39412488; ++ *((int *)&__m128_op0[0]) = 0x80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000014; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x2e34594c; ++ *((int *)&__m128_op0[0]) = 0x3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrpl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7ffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x7ff000ff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrml_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00ff00ff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffefffe; ++ *((int *)&__m128_op0[2]) = 0xfffeffff; ++ *((int *)&__m128_op0[1]) = 0xfffefffe; ++ *((int *)&__m128_op0[0]) = 0xfffeffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000033a; ++ *((int *)&__m128_op0[2]) = 0x0bde0853; ++ *((int *)&__m128_op0[1]) = 0x0a960e6b; ++ *((int *)&__m128_op0[0]) = 0x0a4f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzl_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7ffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfffffffe; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0x7ffeffff; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x7ffeffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrneh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x80808080; ++ *((int *)&__m128_op0[0]) = 0x80638063; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrph_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrph_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000080; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrph_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrph_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ff00ff; ++ *((int *)&__m128_op0[2]) = 0x00ff00ff; ++ *((int *)&__m128_op0[1]) = 0x62cbf96e; ++ *((int *)&__m128_op0[0]) = 0x4acfaf40; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x0000ac26; ++ *((int *)&__m128_op0[1]) = 0x00ff0000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrmh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x6420e020; ++ *((int *)&__m128_op0[2]) = 0x8400c4e3; ++ *((int *)&__m128_op0[1]) = 0x20c4e0c4; ++ *((int *)&__m128_op0[0]) = 0xe0da6499; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfbffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7bffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x18171615; ++ *((int *)&__m128_op0[2]) = 0x17161514; ++ *((int *)&__m128_op0[1]) = 0x16151413; ++ *((int *)&__m128_op0[0]) = 0x15141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x046a09ec; ++ *((int *)&__m128_op0[0]) = 0x009c0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x4f4f4f4f; ++ *((int *)&__m128_op0[2]) = 0x4f4f4f4f; ++ *((int *)&__m128_op0[1]) = 0x4f4f4f4f; ++ *((int *)&__m128_op0[0]) = 0x4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000cf4f4f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000cf4f4f00; ++ __m128i_out = __lsx_vftintrzh_l_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c +new file mode 100644 +index 000000000..7f6d2f4d1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c +@@ -0,0 +1,1028 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128d_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000ffff; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x0000ffff; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0xfffffffe; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0xfffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00040100; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000080; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0xfffffffe; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0xfffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000f0080000f800; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000f0080000f800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffd30000; ++ *((int *)&__m128_op0[2]) = 0x00130000; ++ *((int *)&__m128_op0[1]) = 0xffd30000; ++ *((int *)&__m128_op0[0]) = 0x00130000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xe1000000; ++ *((int *)&__m128_op0[2]) = 0x4deb2610; ++ *((int *)&__m128_op0[1]) = 0xe101e001; ++ *((int *)&__m128_op0[0]) = 0x4dec4089; ++ *((unsigned long *)&__m128i_result[1]) = 0x800000001d64c200; ++ *((unsigned long *)&__m128i_result[0]) = 0x800000001d881120; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x76f42488; ++ *((int *)&__m128_op0[0]) = 0x80000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0202f5f80000ff00; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x003fffc0; ++ *((int *)&__m128_op0[2]) = 0xffc0003f; ++ *((int *)&__m128_op0[1]) = 0xffc0ffc0; ++ *((int *)&__m128_op0[0]) = 0x003f003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffff7fffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x42652524; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003900000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff00ff7f; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x7f800000; ++ *((int *)&__m128_op0[1]) = 0x2d1da85b; ++ *((int *)&__m128_op0[0]) = 0x7f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fffffff; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x80307028; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x8040007f; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128d_op1[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128d_op1[0]) = 0x000000004fc04f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000003a0000003a; ++ *((unsigned long *)&__m128d_op1[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000068; ++ *((unsigned long *)&__m128d_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128d_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x4429146a7b4c88b2; ++ *((unsigned long *)&__m128d_op0[0]) = 0xe22b3595efa4aa0c; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000fffffff5; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0xe7e5560400010001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xe7e5dabf00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x03050302; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x03010302; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000600007fff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000008ffffa209; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x046a09ec009c0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000aa822; ++ *((int *)&__m128_op0[2]) = 0xa79308f6; ++ *((int *)&__m128_op0[1]) = 0x03aa355e; ++ *((int *)&__m128_op0[0]) = 0x1d37b5a1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffff00; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrne_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00001802; ++ *((int *)&__m128_op0[0]) = 0x041b0013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x004200a000200000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0001000101fd01fe; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff80ffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0x7ffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0804080407040804; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0804080407040804; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00010001; ++ *((int *)&__m128_op0[2]) = 0x00010001; ++ *((int *)&__m128_op0[1]) = 0x00010001; ++ *((int *)&__m128_op0[0]) = 0x00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000003ffda00f3; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000003ffda00f3; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xfffffadf; ++ *((int *)&__m128_op0[2]) = 0xfedbfefe; ++ *((int *)&__m128_op0[1]) = 0x5f5f7bfe; ++ *((int *)&__m128_op0[0]) = 0xdefb5ada; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff80000000; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long *)&__m128d_op0[0]) = 0x061202bffb141c38; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfefffffffed08f77; ++ *((unsigned long *)&__m128d_op1[0]) = 0x8160cdd2f365ed0d; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000001; ++ *((int *)&__m128_op0[2]) = 0x084314a6; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x084314a6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x3f413f4100000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128d_op1[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128d_op1[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x3a800000; ++ *((int *)&__m128_op0[2]) = 0x3a800000; ++ *((int *)&__m128_op0[1]) = 0x000ef000; ++ *((int *)&__m128_op0[0]) = 0x0000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vftintrp_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x10404000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x09610001; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x0000001a; ++ *((int *)&__m128_op0[2]) = 0xfffffff7; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000000202fe02; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op1[0]) = 0xffff00fc0000ff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00040004; ++ *((int *)&__m128_op0[2]) = 0x00040004; ++ *((int *)&__m128_op0[1]) = 0x00040004; ++ *((int *)&__m128_op0[0]) = 0x00040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00ffff00; ++ *((int *)&__m128_op0[2]) = 0xff00ff00; ++ *((int *)&__m128_op0[1]) = 0x00ffff00; ++ *((int *)&__m128_op0[0]) = 0xff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x013ec13e; ++ *((int *)&__m128_op0[1]) = 0xc03fc03f; ++ *((int *)&__m128_op0[0]) = 0xc0ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffdfffffff8; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x7fffffff7ffffffb; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x43800000; ++ *((int *)&__m128_op0[0]) = 0x43800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000014; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrm_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x80307028ffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8040007fffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff84fff4; ++ *((int *)&__m128_op0[2]) = 0xff84fff4; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x7fff0007e215b122; ++ *((unsigned long *)&__m128d_op1[0]) = 0x7ffeffff7bfff828; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x07ffc000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffff0000; ++ *((int *)&__m128_op0[0]) = 0x0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xf039b8c0; ++ *((int *)&__m128_op0[2]) = 0xc61e81ef; ++ *((int *)&__m128_op0[1]) = 0x6db7da53; ++ *((int *)&__m128_op0[0]) = 0xfbd2e34b; ++ *((unsigned long *)&__m128i_result[1]) = 0x80000000ffffd860; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff80000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00020000; ++ *((int *)&__m128_op0[0]) = 0xffff0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00027113; ++ *((int *)&__m128_op0[2]) = 0x50a27112; ++ *((int *)&__m128_op0[1]) = 0x00d57017; ++ *((int *)&__m128_op0[0]) = 0x94027113; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xff80ff80; ++ *((int *)&__m128_op0[2]) = 0x7e017f01; ++ *((int *)&__m128_op0[1]) = 0x7f3b7f3f; ++ *((int *)&__m128_op0[0]) = 0x7f3b7f21; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vftintrz_w_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m128d_op0[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128d_op1[1]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128d_op1[0]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c +new file mode 100644 +index 000000000..9c5bb9131 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c +@@ -0,0 +1,345 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((int *)&__m128_op0[3]) = 0x0000c77c; ++ *((int *)&__m128_op0[2]) = 0x000047cd; ++ *((int *)&__m128_op0[1]) = 0x0000c0f1; ++ *((int *)&__m128_op0[0]) = 0x00006549; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0xffffffff; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xffffffff; ++ *((int *)&__m128_op0[0]) = 0xfffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0xffffffff; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0xffffffee; ++ *((int *)&__m128_op0[0]) = 0x00000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x0000ffff; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x63636363; ++ *((int *)&__m128_op0[2]) = 0x63636363; ++ *((int *)&__m128_op0[1]) = 0x63636363; ++ *((int *)&__m128_op0[0]) = 0x63636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vftint_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0xfffffffe; ++ *((int *)&__m128_op0[0]) = 0xffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x02020004; ++ *((int *)&__m128_op0[2]) = 0x02020202; ++ *((int *)&__m128_op0[1]) = 0x00002000; ++ *((int *)&__m128_op0[0]) = 0x00010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x7fff7fff; ++ *((int *)&__m128_op0[2]) = 0x7fff7fff; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x0000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x000000ff; ++ *((int *)&__m128_op0[2]) = 0x808000ff; ++ *((int *)&__m128_op0[1]) = 0x000000ff; ++ *((int *)&__m128_op0[0]) = 0x808000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x7f7f7f7f; ++ *((int *)&__m128_op0[1]) = 0x00000001; ++ *((int *)&__m128_op0[0]) = 0x00000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00002000; ++ *((int *)&__m128_op0[2]) = 0x00002000; ++ *((int *)&__m128_op0[1]) = 0x10000000; ++ *((int *)&__m128_op0[0]) = 0x10000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000001; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x67eb85af; ++ *((int *)&__m128_op0[2]) = 0xb2ebb000; ++ *((int *)&__m128_op0[1]) = 0xc8847ef6; ++ *((int *)&__m128_op0[0]) = 0xed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((int *)&__m128_op0[3]) = 0x00000000; ++ *((int *)&__m128_op0[2]) = 0x00000000; ++ *((int *)&__m128_op0[1]) = 0x00000000; ++ *((int *)&__m128_op0[0]) = 0x00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_wu_s (__m128_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000400000007004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x6a57a30ff0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010001; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x000000009c83e21a; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000022001818; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftint_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x800000001d64c200; ++ *((unsigned long *)&__m128d_op0[0]) = 0x800000001d881120; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x00000000f0009d3c; ++ *((unsigned long *)&__m128d_op0[0]) = 0x000000016fff9dff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128d_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128d_op0[0]) = 0x03fc03fc03fc03fc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c +new file mode 100644 +index 000000000..af75f8e4e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c +@@ -0,0 +1,488 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4ee85545068f3133; ++ *((unsigned long *)&__m128i_op0[0]) = 0x870968c1f56bb3cd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x004e005500060031; ++ *((unsigned long *)&__m128i_result[0]) = 0xff870068fff5ffb3; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff082f000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc04d600d3aded151; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x004cff8fffde0051; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f0000ffffffff; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001fffc0001fffc; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000750500006541; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000100fffffefd; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00ffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffe000000f6; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffffe; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4050000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000f80007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000f8; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ffffffffff80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff80ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffffffe; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xce9035c49ffff570; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0xce9035c49ffff574; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000454ffff9573; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80007fc000003f00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7d187e427c993f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7500000075000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7500000075000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007d1800007c99; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5555000054100000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5555000154100155; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000155; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffebe6ed565; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffebe6ed565; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffbe6ed563; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000078c00000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7d3ac60000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007d3ac600; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff82bb9784; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffc6bb97ac; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff82bb9784; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc6bb97ac; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000003effff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000003effff; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffff359f358; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000e2e36363; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000063636363; ++ __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff02000200; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe00001ffe200; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000383; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe400000003ffc001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe000ffff2382; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e39e496cbc9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x03574e38e496cbc9; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xabff54e911f71b07; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa9ec4882f216ea11; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xaa0051e90ff91808; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe00000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000030000003f; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0001ffff9514; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x9c9c9c9c9c9c9c9c; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000400000001; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ff807e017f01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f3b7f3f7f3b7f21; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0a0000001e000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a000000f6000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0980ff8174017f01; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000ef0000000003b; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000011ff040; ++ __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c +new file mode 100644 +index 000000000..37c769a2d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c +@@ -0,0 +1,452 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff0000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0052005200520052; ++ *((unsigned long *)&__m128i_result[0]) = 0x0052005200520052; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff000000ff; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060012000e002b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000049ffffffaa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000127fffffea; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001201fe01e9; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f008000ea007f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00009f0000009f00; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bd3d00000000; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000007f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007f00000000; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128i_op1[0]) = 0x28bf0351ec69b5f2; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ad00007081; ++ *((unsigned long *)&__m128i_result[0]) = 0x000003510000b5f2; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5252adadadadadad; ++ *((unsigned long *)&__m128i_op1[0]) = 0xadad52525252adad; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000adad0000adad; ++ *((unsigned long *)&__m128i_result[0]) = 0x000052520000adad; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff8000010f78; ++ *((unsigned long *)&__m128i_op1[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001a0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7500000075007500; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00feff8000ff80ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007d1800007c99; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f50000007500; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007e1600007d98; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff760386bdae46; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc1fc7941bc7e00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0802080408060803; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff000086bd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ca000000c481; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000007fff9; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000235600005486; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000b31600006544; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c83e21a22001818; ++ *((unsigned long *)&__m128i_op0[0]) = 0xdd3b8b02563b2d7b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00012c8a0000a58a; ++ __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0024d8f6a494006a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001a8beed86; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010024d8f5; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000078c00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6a57a30ff0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000f0000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffffe; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80000000b57ec564; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000083ff0be0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001b57ec563; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000183ff0bdf; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000e2e3ffffd1d3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200010002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200010002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4ee85545ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x870968c1f56bb3cd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x870968c1f56bb3cd; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000013d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0006000200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0006000200000000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000001b0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000001b0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001b001b; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000004870ba0; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000010000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000010000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff8000010f800000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c +new file mode 100644 +index 000000000..0b51cb8cf +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c +@@ -0,0 +1,327 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffc00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff07effffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100110002; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffeff400000df4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ff91fffffff5; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00650001ffb0; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000bfffffffe0f6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010001000a; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x41dfffffffc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0039ffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffbeffffffffffff; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0037ffdfffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0037ffdfffeb007f; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4e3e133738bb47d2; ++ *((unsigned long *)&__m128i_result[1]) = 0xff98007a004d0050; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff9ff4a0057000e; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000501ffff0005; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000600000001; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020000ffff0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000001; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_result[1]) = 0xffaeffadffaeffad; ++ *((unsigned long *)&__m128i_result[0]) = 0xffaeffadffaeffad; ++ __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffff02; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffff01; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9727b8499727b849; ++ *((unsigned long *)&__m128i_op0[0]) = 0x12755900b653f081; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7d7f13fc7c7ffbf4; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff9727ffff9727; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffe79ffffba5f; ++ __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000100010; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000100c6ffef10c; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff70; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff9001a47e; ++ __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff59; ++ __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x343d8dc5b0ed5a08; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffe00006aea; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000003fe0000141e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffc01ffffebe2; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x67eb8590b2ebafe1; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128i_op1[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4b47edd10bfab44d; ++ __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c +new file mode 100644 +index 000000000..26b51ee14 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c +@@ -0,0 +1,353 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe00fe00fe00fd01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fffefe0100f6; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff0001ffffff0a; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000017161515; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000095141311; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x76f424887fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000170014; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0cff78ff96ff14; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff7cffd6ffc700b0; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0001ffff0001; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010100000101; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007e7e00007e7e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007e7e00007e7e; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2e3a36363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa2e3a36463636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000a2e300006363; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000a2e300006363; ++ __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000052527d7d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000052527d7d; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002400180004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000024; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xabff54f1ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa5f7458b000802ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fff7fc01; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000002; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000002; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80008a7555aa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a7535006af05cf9; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff758aaa56; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffa9fb0d07; ++ __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0f180000ffe00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ca02f854; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000004b01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffb4ff; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe4b5ffff87f8; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc03fc000000003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f1fd800000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc03fc000000004; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc080800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc080800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x418181017dfefdff; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x67eb85afb2ebb000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff8000010f78; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff7f0080ff7ef088; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000155; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffff10000; ++ __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c +new file mode 100644 +index 000000000..aa802b295 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c +@@ -0,0 +1,353 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007fffff00000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6a1a3fbb3c90260e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8644000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaed495f03343a685; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffbe6ed563; ++ *((unsigned long *)&__m128i_result[1]) = 0x8644ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000fffe; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000e13; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000e13; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a00000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000004f804f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000004f804f80; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x004f0080004f0080; ++ *((unsigned long *)&__m128i_result[0]) = 0x004f0080004f0080; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ffa7f8ff81; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003f0080ffc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007fff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000a7f87fffff81; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000080003f80ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x202020202020ff20; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2000200020002000; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808ffff0808ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808ffff0808ffff; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000157; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002008360500088; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000f3040705; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c +new file mode 100644 +index 000000000..88c66f220 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c +@@ -0,0 +1,327 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000201000000000b; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffcff; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7404443064403aec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000d6eefefc0498; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff7f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2d1da85b7f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x002d001dd6a8ee5b; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe7ffc8004009800; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000010; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff0000007f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001e8e1d8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000e400000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001e8e1d8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000e400000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000e4e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000101; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000000000000; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffe0; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80005613; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007f800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000807f80808000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80006b0000000b00; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080808000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080006b0000000b; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0808000c0808000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xc080800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc080800000000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007ffff001000300; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0001000300; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f00ff00ff00fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x8); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0014001400140000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400000000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000009c007c00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000071007600; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000060002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000060002; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe4c8b96e2560afe9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc001a1867fffa207; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000c0010000a186; ++ *((unsigned long *)&__m128i_result[0]) = 0x00067fff0002a207; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000014414104505; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1011050040004101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000014414104505; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1011050040004101; ++ *((unsigned long *)&__m128i_result[1]) = 0x1010111105050000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040000041410101; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffac5cffffac5c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffac5cffffac5c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x010169d9010169d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010287010146a1; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ff01ac025c87; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01ff01ac465ca1; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff01ff01ac025c87; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01ff01ac465ca1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ff0100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xac465ca100000000; ++ __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002427c2ee; ++ *((unsigned long *)&__m128i_result[1]) = 0xf8e10000a03a0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff2427e3e2c2ee; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffe4ffe4ffe4ffe4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffe4ffe4ffe4ffe4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01e41ffff0e440; ++ __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ffffe41f0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff00000ffff0000; ++ __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c +new file mode 100644 +index 000000000..2b9dcc0b5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c +@@ -0,0 +1,278 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000007942652524; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4265252400000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_op1 = 0x0000007942652524; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff2524ffffffff; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5d5d5d5d5d5d5d55; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x5d5d5d005d5d5d55; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020202020; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x202020202020ff20; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fe01fc0005fff4; ++ int_op1 = 0x0000000020202020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000820202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe01fc0005fff4; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffbfffffffbf; ++ long_op1 = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003a24; ++ __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ef8000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ef8000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ef8000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ long_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001000; ++ int_op1 = 0x000000007ff00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000600; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001f1f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000001f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_op1 = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ffffff0000; ++ __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04faf60009f5f092; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04fafa9200000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x04faf600fff5f092; ++ *((unsigned long *)&__m128i_result[0]) = 0x04fafa9200000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c +new file mode 100644 +index 000000000..7cd9abb7c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c +@@ -0,0 +1,62 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ab7a3fc47a5c31a; ++ __m128i_out = __lsx_vld ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ab7a3fc47a5c31a; ++ __m128i_out = __lsx_vldx ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0xc3c3c3c3c3c3c3c3; ++ *((unsigned long *)&__m128i_result[0]) = 0xc3c3c3c3c3c3c3c3; ++ __m128i_out = __lsx_vldrepl_b ((unsigned long *)&__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0xc31ac31ac31ac31a; ++ *((unsigned long *)&__m128i_result[0]) = 0xc31ac31ac31ac31a; ++ __m128i_out = __lsx_vldrepl_h ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x47a5c31a47a5c31a; ++ *((unsigned long *)&__m128i_result[0]) = 0x47a5c31a47a5c31a; ++ __m128i_out = __lsx_vldrepl_w ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ab7a3fc47a5c31a; ++ __m128i_out = __lsx_vldrepl_d ((unsigned long *)&__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c +new file mode 100644 +index 000000000..089500ea9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c +@@ -0,0 +1,61 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x00a300a300a300a3; ++ *((unsigned long *)&__m128i_result[0]) = 0x00a300a300a300a3; ++ __m128i_out = __lsx_vldi (1187); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffe15; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffe15; ++ __m128i_out = __lsx_vldi (3605); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0xecececececececec; ++ *((unsigned long *)&__m128i_result[0]) = 0xecececececececec; ++ __m128i_out = __lsx_vldi (1004); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff00ff00ff00; ++ __m128i_out = __lsx_vldi (-1686); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_result[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_result[0]) = 0x004d004d004d004d; ++ __m128i_out = __lsx_vldi (1101); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0000000a000000; ++ __m128i_out = __lsx_vldi (-3318); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff00ff00ff00; ++ __m128i_out = __lsx_vldi (-1686); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0000000a000000; ++ __m128i_out = __lsx_vldi (-3318); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c +new file mode 100644 +index 000000000..3fade5157 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c +@@ -0,0 +1,450 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffa486c90f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1f52d710bf295626; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff7f01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff7f01ff01; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfa31dfa21672e711; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1304db85e468073a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x887c8beb969e00f2; ++ *((unsigned long *)&__m128i_op2[0]) = 0x101f8b680b6f8095; ++ *((unsigned long *)&__m128i_result[1]) = 0x7582ed22cb1c6e12; ++ *((unsigned long *)&__m128i_result[0]) = 0x35aaa61c944f34c2; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_result[0]) = 0x5252525252525252; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xc); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000400040004002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_result[0]) = 0xf10cf508f904fd01; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb080ffffb080; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffb080ffffb080; ++ *((unsigned long *)&__m128i_op2[1]) = 0x004fcfcfd01f9f9f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x9f4fcfcfcf800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3504b5fd2dee1f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x4676f70fc0000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf7f7f7ff8e8c6d7e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf7f7f7f7f7f7fbff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; ++ *((unsigned long *)&__m128i_result[0]) = 0xf7f7f7f7f7f7fbff; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0fbc1df53c1ae3f9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff820f81; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xf144e32bc4e61d27; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000020017ef19f; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000004b01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000004b01; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff0000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffefffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf001f0010101f002; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007f41; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000000000001; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01ff020000ff03ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01346b8d00b04c5a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x01ff020000ff03ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x01346b8d00b04c5a; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080808000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080808000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000455555555; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007f00ff00ff00fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f00ff00ff00fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xdcec560380000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x08ec7f7f80000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_op2[1]) = 0x32d8f0a905b6c59b; ++ *((unsigned long *)&__m128i_op2[0]) = 0x322a52fc2ba83b96; ++ *((unsigned long *)&__m128i_result[1]) = 0xaa14efac3bb62636; ++ *((unsigned long *)&__m128i_result[0]) = 0xd6c22c8353a80d2c; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op2[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff000000001f1f00; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00009c7c00007176; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf00040fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf00000fbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x9727b8499727b849; ++ *((unsigned long *)&__m128i_op2[0]) = 0x12755900b653f081; ++ *((unsigned long *)&__m128i_result[1]) = 0x00060fbf00040fbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020fbf00000fbf; ++ __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000021100000211; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfb141d31fb141d31; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op2[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op2[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_result[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_result[0]) = 0xbbc8ecc5f3ced5f3; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0674c886fcba4e98; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffc0ffc0003f003f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x002a05a2f059094a; ++ *((unsigned long *)&__m128i_op2[0]) = 0x05ad3ba576eae048; ++ *((unsigned long *)&__m128i_result[1]) = 0xd4a6cc27d02397ce; ++ *((unsigned long *)&__m128i_result[0]) = 0x24b85f887e903abe; ++ __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0700f8ff0700f8ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000007020701; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000007010701; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000008680f1ff; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636463abdf17; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363636463abdf17; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e08016161198; ++ __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x17c64aaef639f093; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op2[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff800000; ++ __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c +new file mode 100644 +index 000000000..d3fd83da7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c +@@ -0,0 +1,472 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000036de0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003be14000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000ffff7a53; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001f0000; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000cdc1; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128i_op2[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long *)&__m128i_op2[0]) = 0xcd1de80217374041; ++ *((unsigned long *)&__m128i_result[1]) = 0xf490ee600180ce20; ++ *((unsigned long *)&__m128i_result[0]) = 0x063bff74fb46e356; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op2[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op2[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0fff0fff0fff0fff; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363636363636363; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe00029f9f6061; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f5ec0a0feefa0b0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffe00029fb060b1; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op1[0]) = 0x39c51f389c0d6112; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff0001ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ff9b0082; ++ *((unsigned long *)&__m128i_result[0]) = 0x003a0037fff2fff8; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_result[0]) = 0x05fafe0101fe000e; ++ __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff82bb9784; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc6bb97ac; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fffffff82bb9784; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fffffffc6bb97ac; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff82bb9784; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc6bb97ac; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x05d0ba0002e8802e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd005e802174023d6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc000c000c000ff81; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0ba00ba00ba00ba0; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0ba00ba00ba011eb; ++ *((unsigned long *)&__m128i_result[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long *)&__m128i_result[0]) = 0xcd1de80217374041; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000fff00000e36; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fef01000e27ca; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc89d7f0fed582019; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_op2[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x387c7e0a133f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_result[0]) = 0xc89d7f0ff90da019; ++ __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01fc020000fe0100; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa23214697fd03f7f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7c7c9c0000007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7c7c9c0000007176; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_op0[0]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_op1[1]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_op1[0]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000080c43b700; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x036caeeca7592703; ++ *((unsigned long *)&__m128i_result[0]) = 0x022002101b200203; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c83e21a22001818; ++ *((unsigned long *)&__m128i_op1[0]) = 0xdd3b8b02563b2d7b; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000009c83e21a; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000022001818; ++ *((unsigned long *)&__m128i_result[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long *)&__m128i_result[0]) = 0x0b73e427f7cfcb88; ++ __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0006000200000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505445465593af1; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0006000200000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000780000007800; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000000010000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf047ef0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80800001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff80800001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff7fff7ef; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffbff8888080a; ++ *((unsigned long *)&__m128i_result[0]) = 0x080803ff807ff7f9; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc03fc000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fc03fc000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f801fe000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01fe01fd01fd01fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x5d7f5d007f6a007f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fff7fff7fff7f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000500000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128i_op2[0]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c +new file mode 100644 +index 000000000..839285685 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c +@@ -0,0 +1,383 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7e00fe0000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffbffffff85; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffc0000fdfc; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3941248880000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3941248880000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x76f4248880000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000e36400005253; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000035ed0000e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x400000003fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4000000040000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x400000003fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000000040000000; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_op0[0]) = 0x78508ad4ec2ffcde; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long *)&__m128i_result[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_result[0]) = 0x78508ad4ae70fd87; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000440efffff000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x440ef000440ef000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x4400000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000440efffff000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000003b; ++ __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000ffc2f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00201df000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc2ffe700000007; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_op2[0]) = 0x011f57c100201a46; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffce00016fb41; ++ *((unsigned long *)&__m128i_result[0]) = 0x57cb857100001a46; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7c7c9c0000007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00ff000000001f1f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7c7c9c0000007176; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc5c53492f25acbf2; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff000000001f1f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_result[0]) = 0xc5c53492f25acbf2; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd73691661e5b68b4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000016f303dff6d2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000016f303dff6d2; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fffffff00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_result[0]) = 0xee297a731e5c5f86; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff008ff820; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe8008fffe7008f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00010001f1153780; ++ __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000021; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op2[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001808281820102; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001808201018081; ++ __m128i_out = __lsx_vmaddwev_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000010100fe0101; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff0200ffff01ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f7f80807f7f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000020302030; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000020302030; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3fffffffc0000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006e17bfd8; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010000fffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080806362; ++ *((unsigned long *)&__m128i_result[0]) = 0x807f808000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000101010015; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffed00010001; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c +new file mode 100644 +index 000000000..bab2c6cf3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c +@@ -0,0 +1,383 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xc0c00000c0c00000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xc0c00c01c2cd0009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f78787f00f7f700; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000f7f700f7f700; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000400; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000080003f80ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000080003f80ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff81ff82ff810081; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff82ff810081ff81; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x841f000fc28f801f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x841f000fc28f801f; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x76ecfc8b85ac78db; ++ __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fff3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000000000040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010400; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000002b0995850; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80005613; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007f800000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffff80005613; ++ *((unsigned long *)&__m128i_op2[0]) = 0x007f800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00011cf0c569; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0000002b0995850; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ffffff81fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff00ffff7e01; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000fffe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe00fffe86f901; ++ __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff8004000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfffbfffbfffbffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffd3000000130000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000f02e1f80f04; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000f02e1f80f04; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x5a57bacbd7e39680; ++ *((unsigned long *)&__m128i_op2[0]) = 0x6bae051ffed76001; ++ *((unsigned long *)&__m128i_result[1]) = 0xf3eb458161080000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffe9454286c0e000; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0051005200510052; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0051005200510052; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe65ecc1be5bc; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe65ecc1be5bc; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf1f1f1f1865e65a1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff941d; ++ *((unsigned long *)&__m128i_op2[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_op2[0]) = 0xf1f1f1f1865e65a1; ++ *((unsigned long *)&__m128i_result[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_result[0]) = 0x78508ad4ec2ffcde; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000cfffffff2; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000dfffffff1; ++ __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff80ffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7ffffffeffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000002fe800000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffe0100000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80000000000001; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c +new file mode 100644 +index 000000000..5875aa597 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c +@@ -0,0 +1,372 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffff000f0008d3c; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000100f8100002; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff0ff8006f0f950; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007ffd0001400840; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007ffd0001400840; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000002000; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000200000001b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffac0a000000; ++ __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000017fda829; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff8000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000800000000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op2[0]) = 0xd705c77a7025c899; ++ *((unsigned long *)&__m128i_result[1]) = 0xffcb410000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffeb827ffffffff; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffffc00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffffc00; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000c5ac01015b; ++ *((unsigned long *)&__m128i_op1[0]) = 0xaaacac88a3a9a96a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ef4002d21fc7001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x28bf02d1ec6a35b2; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff8000007fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128i_result[0]) = 0x28bf0351ec69b5f2; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001200100012001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xbf80000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1040400000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0961000100000001; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7da9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x03574e39e496cbc9; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f77aab500000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3f77aab500000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fbc1df53c1ae3f9; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff820f81; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ff801c9e; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000810000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000700000004e000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000012020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000e00a18f5; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000002023dcdc; ++ *((unsigned long *)&__m128i_result[1]) = 0x000700000004e000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000000012020; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000011ffee; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000dfff2; ++ __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7fffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffff7fffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000003fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ff8010000000001; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x01fc020000fe0100; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x78c00000ff000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1000100012030e02; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x010105017878f8f6; ++ *((unsigned long *)&__m128i_op2[0]) = 0xf8f8fd0180810907; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080800000808; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000158; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x328e1080889415a0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3960b1a401811060; ++ *((unsigned long *)&__m128i_op1[1]) = 0x328e1080889415a0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3960b1a401811060; ++ *((unsigned long *)&__m128i_op2[1]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x32f3c7a38f9f4b8b; ++ *((unsigned long *)&__m128i_result[0]) = 0x2c9e5069f5d57780; ++ __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c +new file mode 100644 +index 000000000..4be7fce82 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c +@@ -0,0 +1,438 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0d0d0d0d0d0d0d0d; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe31c86e90cda86f7; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100400100200e68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100400100200e68; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1ff85ffe2ae5d973; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010020fffeffde; ++ *((unsigned long *)&__m128i_result[0]) = 0x011f57c100201a46; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc0800000; ++ __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_result[0]) = 0x003dc288077c7cc1; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffc0000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0000000000004; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000053a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000036280001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x42a0000042a02001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000036280001; ++ *((unsigned long *)&__m128i_result[0]) = 0x42a0000042a02001; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000ff00fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00ff; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff946c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff946b; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff3c992b2e; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff730f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffff946c; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffff946b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff946c; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffdffff946c; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006ffff0004ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002ffff0000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff7f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002fffefffd0001; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c6fde000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xe000e0006080b040; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffe000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c6fde000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op1[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a753500950fa306; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0a753500950fa306; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a753500a9fa0d06; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f7f80807f7f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000020302030; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000020302030; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3fffffffc0000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006e17bfd8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006e17bfd8; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010000fffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000008000e2e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080806362; ++ *((unsigned long *)&__m128i_result[0]) = 0x807f808000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8101010181010101; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000101010015; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffed00010001; ++ __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c +new file mode 100644 +index 000000000..8a4c39502 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c +@@ -0,0 +1,460 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00003fe00ffe3fe0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000b5207f80; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bd3d00000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op2[0]) = 0x2020202020207f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001021; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op2[0]) = 0xc3818bffe7b7a7b8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000467fe000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000467fef81; ++ __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fc0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1e801ffc00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff020000fff4; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fc0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1e801ffc00000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80000000000001; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff03ffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00013fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000088500000f6a0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffd00000407; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000442900007b4c; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000e22b0000efa4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff03ffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00013fff; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d37e80682a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x14ccc6320176a4d2; ++ *((unsigned long *)&__m128i_result[0]) = 0x685670d37e80682a; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0411fe800000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x601fbfbeffffffff; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffc105d1aa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffbc19ecca; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff3efa; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff43e6; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffa7; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000930400008a10; ++ *((unsigned long *)&__m128i_result[0]) = 0x00006f9100007337; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op2[0]) = 0x001000100010c410; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffff02fff4; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffff02fff4; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7e44bde9b842ff23; ++ *((unsigned long *)&__m128i_result[0]) = 0x00011e80007edff8; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffeffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe0dd268932a5edf9; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe0dd268932a5edf9; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_result[0]) = 0xbddaa86803e33c2a; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0028280000282800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7505853d654185f5; ++ *((unsigned long *)&__m128i_op2[0]) = 0x01010000fefe0101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x012927ffff272800; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffff7f00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff007f0101017f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000020000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000183fffffe5; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000000000002a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff7f00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff007f0101017f; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op2[1]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f8000003f800000; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000095896a760000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x006f0efe258ca851; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffff7fc8ffff8000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff200000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000015516a768038; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff9ed2e1c000; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2000200000013fa0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000013fa0; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080006b00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001b19b1c9c6da5a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x001b19b1c9c6da5a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x008003496dea0c61; ++ __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000001ff000001ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xff80ffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7ffffffeffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000002fe800000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffe0100000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80000000000001; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c +new file mode 100644 +index 000000000..b0e22f955 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c +@@ -0,0 +1,317 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f007f007f007f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000003f; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000010000f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000010000f01; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffdfffcfffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80df00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010100000100000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100000101000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010100000100000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100000101000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0040000000ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040000000ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xb327b9363c992b2e; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa1e7b475d925730f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff3c992b2e; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff730f; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000001ff; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000003d0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000003d0000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007001400000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000053a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000700140000053a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004001000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80c400000148; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff80c1ffffe8de; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000148; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000034; ++ __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000b3a6000067da; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00004e420000c26a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5779108fdedda7e4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000b3a6000067da; ++ *((unsigned long *)&__m128i_result[0]) = 0x5779108f0000c26a; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op1[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_result[0]) = 0x020310edc003023d; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe03fe01fe01fe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe3bfa3ffe3bfb21; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001d001d001d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_result[0]) = 0x001d001d001d0000; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000155; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000155; ++ __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000051649b6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd2f005e44bb43416; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003e0000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000051649b6; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003e0000003f; ++ __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00012c8a0000a58a; ++ __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c +new file mode 100644 +index 000000000..51a9a92e8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c +@@ -0,0 +1,362 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000007f0000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003a0000003a; ++ *((unsigned long *)&__m128i_result[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_result[0]) = 0x77c03fd640003fc6; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbafebb00ffd500fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5b5b5b5aa4a4a4a6; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x5b5b5b5aadadadad; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe03fe3ffe01fa21; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000007500; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007e1600007d98; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000fe00fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f50000fe75fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe7efe00fe7dfe; ++ __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2002040404010420; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101010180800101; ++ *((unsigned long *)&__m128i_result[1]) = 0x2002040404010420; ++ *((unsigned long *)&__m128i_result[0]) = 0x9c9c9c9c80800101; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffdf; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf001f0010101f002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x685670d27e00682a; ++ *((unsigned long *)&__m128i_result[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_result[0]) = 0x685670d27e00682a; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_result[0]) = 0x27b169bbb8145f50; ++ __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80ff0010ff06; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f01000eff0a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff80ff0010ff06; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ff0000000007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002bfd9461; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff0000000ad3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff000fffff000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff00010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff000fffff000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xc605c000aedd0000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x479f64b03373df61; ++ *((unsigned long *)&__m128i_result[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long *)&__m128i_result[0]) = 0x479f64b03373df61; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_result[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_result[0]) = 0x52525252adadadad; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080700000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c +new file mode 100644 +index 000000000..7cff1d848 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c +@@ -0,0 +1,279 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003be14000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003bfb4000; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007ffffffb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x010101017f010101; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0b0b0b0b0b0b0b0b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0b0b0b0b0b0b0b0b; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000c; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0606060606060606; ++ *((unsigned long *)&__m128i_result[0]) = 0x0606060606060606; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_b (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001fc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000001fc00000000; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000fff; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000a1ff4c; ++ *((unsigned long *)&__m128i_result[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000300a10003; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000b000b000b; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vmaxi_h (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020100fedd0c00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000201000000000b; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000004; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000050000007b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000500000005; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x001fffff001fffff; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000b0000000b; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000900000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000900000009; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000600000006; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000600000006; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f80000000000007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000700000007; ++ __m128i_out = __lsx_vmaxi_w (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000007f00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000000; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000a; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e880ffffffff; ++ __m128i_out = __lsx_vmaxi_d (__m128i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c +new file mode 100644 +index 000000000..b79af2228 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c +@@ -0,0 +1,223 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[0]) = 0x0303030303030303; ++ __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111111111111111; ++ __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111111111111111; ++ __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0a0a0a0a0a0a0a; ++ __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0011001100110011; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_result[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f000d200e000c20; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fffefffefffef; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005000500050005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005000500050005; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x001d001d20000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x001d001d20000020; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003fff00010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00123fff00120012; ++ *((unsigned long *)&__m128i_result[0]) = 0x0012001200120012; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a001a001a001a; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a001a001a001a; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_result[0]) = 0x001e001e001e001e; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_result[0]) = 0x001d001d001d001d; ++ __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800000008; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001600000016; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001600000016; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f5533a694f902c0; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x37c0001000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x37c0001000000001; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xcf00000000000000; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000011; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001c; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_result[0]) = 0x43d3e0000013e000; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001d; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001b; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c +new file mode 100644 +index 000000000..b2a7a35bd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c +@@ -0,0 +1,434 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff000000ff00; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff91fffffff5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff00650001ffb0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000067400002685; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ff91fffffff5; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00650000ff85; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000a0; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000008680f1ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ffffff80ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80ffff8680f1ff; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0e440; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe4ffffffe4ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe4fffff0e4ff; ++ __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000a16316b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000063636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000a1630000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc0ff81000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000600000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc0ff81000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fdffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80ffffffffff02; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x027e0000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80ffffffffff02; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffe0000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_result[0]) = 0xbbc8ecc5f3ced5f3; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc090380000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc090380000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xc090380000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8493941335f5cc0c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x625a7312befcb21e; ++ *((unsigned long *)&__m128i_result[1]) = 0x8493941300000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000002befcb21e; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000078c00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5d5d5d5d5d5d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_result[0]) = 0xc605c000aedd0000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000003000000d613; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c0000000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000200000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff800000ff800000; ++ __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000210011084; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000017f0a82; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3e25c8317394dae6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xcda585aebbb2836a; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_result[0]) = 0x377b810912c0e000; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcfcfcfcfcfd; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_result[0]) = 0xf9796558e39953fd; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c +new file mode 100644 +index 000000000..c90cae75e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000300000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe0004fffe0004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_result[0]) = 0xf9796558e39953fd; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_result[0]) = 0x27b169bbb8145f50; ++ __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x9c9c9c9c00000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207f7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f417f417f027e03; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020207e03; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00008d3200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x479f64b03373df61; ++ *((unsigned long *)&__m128i_result[1]) = 0x00008d3200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a09080709080706; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xa87745dbd93e4ea1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xaa49601e26d39860; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_result[1]) = 0x2006454652525252; ++ *((unsigned long *)&__m128i_result[0]) = 0x2006454652525252; ++ __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff2382; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7da9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0505050505050505; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000005050000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0028280000282800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000282800; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fd13fc02fe0c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fd14fe01fd16; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe00fd1400010000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2000200020002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2000200020002000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9611c3985b3159f5; ++ *((unsigned long *)&__m128i_result[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_result[0]) = 0x9611c3985b3159f5; ++ __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000de0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000006f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff007fff810001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000400530050ffa6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff7f810100001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001fffc0ffffe001; ++ *((unsigned long *)&__m128i_result[1]) = 0xff7f810100001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000400530050ffa6; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007efe7f7f8000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000b81c8382; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000077af9450; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000077af9450; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c +new file mode 100644 +index 000000000..772d040c3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c +@@ -0,0 +1,314 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffffffc; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00002f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958aefff895e; ++ *((unsigned long *)&__m128i_result[1]) = 0xfafafafafafafafa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfafa958aeffa89fa; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_result[1]) = 0xfbfbfbfbadadadad; ++ *((unsigned long *)&__m128i_result[0]) = 0xfbfbfbfbadadadad; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf1f1f1f1f1f1f1f1; ++ *((unsigned long *)&__m128i_result[0]) = 0xf1f1f1f1f1f1f1f1; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000007500; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007e1600007d98; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000090900000998; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf1f181a2f1f1f1b0; ++ *((unsigned long *)&__m128i_result[0]) = 0xf1f1f1f1f180f1f1; ++ __m128i_out = __lsx_vmini_b (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff6fff6fff6fff6; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1716151416151413; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1514131214131211; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff3fff3fff3fff3; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff3fff3fff3fff3; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff4fffffff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff4fffffff4; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff3fffffff3; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffefffef; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01fe0400000006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000500000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01fe0400000005; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffafffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffafffffffa; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000d0000000d; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x345002920f3017d6; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff7fffffff7; ++ __m128i_out = __lsx_vmini_w (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100100000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff1; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff1; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000006; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000006; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00a6ffceffb60052; ++ *((unsigned long *)&__m128i_result[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff9; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff9; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111100; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x55aa55c3d5aa55c4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa55556fd5aaaac1; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000c; ++ *((unsigned long *)&__m128i_result[0]) = 0xaa55556fd5aaaac1; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff4; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffb; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffb; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001030103; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000085af0000b000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00017ea200002000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff4; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01e41ffff0ffff; ++ __m128i_out = __lsx_vmini_d (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c +new file mode 100644 +index 000000000..6eaae2134 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c +@@ -0,0 +1,216 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0001ffff0001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000a163000016b0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303000103030001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000030300000303; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd8248069ffe78077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0d0d0d0d0d0d0d0d; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7da9b23a624082fd; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0505050505050505; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000005050000; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000001fffdfffdff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001fffdfffdff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010101010101; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000009c007c00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000071007600; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000009000900; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[0]) = 0x0303030303030303; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3220000d3f20000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8bff0000a7b80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0909000009090000; ++ __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000000b57ec564; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000083ff0be0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0014000000140014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0014000000140014; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0013001300130013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0013001300130013; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040004000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff0000007f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000003fc00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fe01fe00; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000a; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000d3460001518a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000084300000e55f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000016; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000007; ++ __m128i_out = __lsx_vmini_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c +new file mode 100644 +index 000000000..5470d40dd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c +@@ -0,0 +1,254 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x82c539ffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long *)&__m128i_op1[1]) = 0x82c539ffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff994cb09c; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc3639d96; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c844; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c844; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001808281820102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001808201018081; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001008281820102; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001008201010081; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010240010202; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0804080407040804; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000104000800; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf6548a1747e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfeca2eb9931; ++ *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[0]) = 0x370bdfeca2eb9931; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x805ffffe01001fe0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9a49e11102834d70; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8144ffff01c820a4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9b2ee1a4034b4e34; ++ *((unsigned long *)&__m128i_result[1]) = 0xff1affff01001fe0; ++ *((unsigned long *)&__m128i_result[0]) = 0xff1aff6d02834d70; ++ __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001d001d001d0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001d001d001d001d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001d001d001d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff000000ff00; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000000307d0771; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0d8e36706ac02b9b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80000000307d0771; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0d8e36706ac02b9b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x413e276583869d79; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f017f9d8726d3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000011ffee; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000dfff2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c +new file mode 100644 +index 000000000..8deb04427 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c +@@ -0,0 +1,254 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x16161616a16316b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x16161616a16316b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001494b494a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001494b494a; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f801fa06451ef11; ++ *((unsigned long *)&__m128i_op1[0]) = 0x68bcf93435ed25ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000022666621; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffdd9999da; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f7f7f7f00107f04; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f0000fd7f0000fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000066621; ++ *((unsigned long *)&__m128i_result[0]) = 0x01ff00085e9900ab; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xefffdffff0009d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0000; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x478b478b38031779; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6b769e690fa1e119; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000004870ba0; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2006454690d3de87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff100000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000f000000000000; ++ __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffe0000fffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffe0000fffe; ++ __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000101fd01fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80ff80ff80ff80; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80ff8080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000101fd01fe; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff2cfed4fea8ff44; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffeffff0035ff8f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000a0; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003c853c843c844; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003c853c843c844; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003ddc5dac; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffefffff784; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f8000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001000010f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000011ff8bc; ++ __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c +new file mode 100644 +index 000000000..64a950f81 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c +@@ -0,0 +1,119 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000cb4a; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff7f01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000033; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffe00006aea; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffce; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskgez_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c +new file mode 100644 +index 000000000..8f743ec2e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c +@@ -0,0 +1,321 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd83c8081ffff8080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000013d; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000f0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000100010001fffd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001007c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9780697084f07dd7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x87e3285243051cf3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000cdc1; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x05d0ae6002e8748e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcd1de80217374041; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000065a0; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000004b01; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff08ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003f3f; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000022; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000008080600; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0018; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000035697d4e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000013ecaadf2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000006de1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5f9ccf33cf600000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003ffffe00800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000034; ++ __m128i_out = __lsx_vmskltz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc3818bffe7b7a7b8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000027; ++ __m128i_out = __lsx_vmskltz_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmskltz_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c +new file mode 100644 +index 000000000..d547af0d3 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c +@@ -0,0 +1,104 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001e1f; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x009500b10113009c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x009500b10113009c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000005d5d; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffff000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000fe; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000007f41; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0014001400140000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000554; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x202544f490f2de35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x202544f490f2de35; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000a74aa8a55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6adeb5dfcb000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003ff8; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x317fce80317fce80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vmsknz_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c +new file mode 100644 +index 000000000..47cf33cfd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c +@@ -0,0 +1,461 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffff0002; ++ *((unsigned long *)&__m128i_op2[1]) = 0x54beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op2[0]) = 0x8024d8f6a494afcb; ++ *((unsigned long *)&__m128i_result[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_result[0]) = 0x0024d8f6a494006a; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffff0001ffff; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffffff0ffe04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001fc0000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x040004000400040d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x040004000400040d; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xb327b9363c99d32e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa1e7b475d925730f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003f80b0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_result[1]) = 0xb327b9363c992b2e; ++ *((unsigned long *)&__m128i_result[0]) = 0xa1e7b475d925730f; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000004c7f4c7f; ++ *((unsigned long *)&__m128i_op2[0]) = 0xe0c0c0c0d1c7d1c6; ++ *((unsigned long *)&__m128i_result[1]) = 0x061006100613030c; ++ *((unsigned long *)&__m128i_result[0]) = 0x4d6814ef9c77ce46; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000f00; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002bfd9461; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3727f00000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc7e01fcfe0000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3727112c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x39201f7120000040; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xe5b9012c00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc7e01fcfe0000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0204; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000442900007b4c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000e22b0000efa4; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000442800007b50; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0204; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffffffe; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op2[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000029; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0000007f800000; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0fff0fff0fff0fff; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f07f0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff177fffff0fc; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffbfffefffc9510; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffbfffefffc9510; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfffbfffefffc9510; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfffbfffefffc9510; ++ *((unsigned long *)&__m128i_result[1]) = 0x29c251319c3a5c90; ++ *((unsigned long *)&__m128i_result[0]) = 0x62fb9272df7da6b0; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8f8f8f8f8f8f8f8f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8f8f8f8f8f8f8f8f; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x800000007fffffff; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000053a4f452; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001400000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400000000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff913bfffffffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff913bfffffffd; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_result[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff913bb9951901; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0021b761002c593c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x002584710016cc56; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000001e03; ++ *((unsigned long *)&__m128i_result[1]) = 0x0021b761002c593c; ++ *((unsigned long *)&__m128i_result[0]) = 0x002584710016ea59; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000290; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000290; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0002000400000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000500000001; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op2[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_result[1]) = 0xfd200ed2fd370775; ++ *((unsigned long *)&__m128i_result[0]) = 0x96198318780e32c5; ++ __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_op2[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c +new file mode 100644 +index 000000000..ab650a024 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c +@@ -0,0 +1,353 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x059a35ef139a8e00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040404040404040; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0c00000c0c00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0c00c01c2cd0009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0fffff000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffe00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff0000ac26; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ffffff81fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff00ffff7e01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000fffe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000fe86; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff8000010f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fff80000; ++ __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf3efff536d5169b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ebdfffffddf3f40; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f5ec0a0feefa0b0; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fffffff3ffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fffffff3ffffffe; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffa0204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001f7fc100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x001f7fff00000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000cd630000cd63; ++ *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffcd63ffffcd63; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffd765ffffd765; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000015516a768038; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff9ed2e1c000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007ffd0001400840; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007ffd0001400840; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003ffd000a4000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000009c400000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0202fe02fd020102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000202fe02; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006362ffff; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffe0002; ++ __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c +new file mode 100644 +index 000000000..60b6e3503 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c +@@ -0,0 +1,372 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000c5ac01015b; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaaacac88a3a9a96a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000038003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000040033; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000068; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f881a20ffd02b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0ff780a10efc01af; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fe7f0000; ++ __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000efffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001001100110068; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1d8000001d800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1d8000001d800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1d8000001d800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1d8000001d800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0366000003660000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0366000003660000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128i_op0[0]) = 0x28bf0351ec69b5f2; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ef3ddac21fc5a2c; ++ *((unsigned long *)&__m128i_result[0]) = 0x28bee9edec690869; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000214f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc31b63d846ebc810; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff0000800000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff941d; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000010a7; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000046ebaa2c; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000cf4f4f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000cf4f4f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000005f0003e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000003397dd140; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000004bd7cdd20; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0016ffb00016ffb0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0016ffb00016ffb0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000004a294b; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000006d04bc; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007ffe7ffe400000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007ffd0001400840; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffa800000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000157; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1baf8eabd26bc629; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1c2640b9a8e9fb49; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002dab8746acf8e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00036dd1c5c15856; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003a7fc58074ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000eeff1100e; ++ __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c +new file mode 100644 +index 000000000..8ba666275 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c +@@ -0,0 +1,282 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x54feed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8064d8f6a494afcb; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffe003c1f0077; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff0074230438; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000000438; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000800800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000004000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff5fff4002ffff5; ++ *((unsigned long *)&__m128i_op1[1]) = 0xaa858644fb8b3d49; ++ *((unsigned long *)&__m128i_op1[0]) = 0x18499e2cee2cc251; ++ *((unsigned long *)&__m128i_result[1]) = 0x8644000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xaed495f03343a685; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505443065413aed; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0xb71289fdfbea3f69; ++ *((unsigned long *)&__m128i_result[0]) = 0x4e17c2ffb4851a40; ++ __m128i_out = __lsx_vmul_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fcfefc02fdf7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fcfffe01fd01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_result[1]) = 0xc72ef153fc02fdf7; ++ *((unsigned long *)&__m128i_result[0]) = 0xca31bf15fd010000; ++ __m128i_out = __lsx_vmul_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc000c000c000ff81; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5d5d5d5d5d5d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_result[0]) = 0xc605c000aedd0000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xacc8c794af2caf01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa91e2048938c40f0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xeeb1e4f43c3763f3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff5a6fe3d7; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000021e79364; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000718ea657431b; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000006ca193ec; ++ *((unsigned long *)&__m128i_result[0]) = 0x00008e72b5b94cad; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x467f6080467d607f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007f008000ea007f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffe0004fffe0004; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc1bdceee242070db; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe8c7b756d76aa478; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f433212dce09025; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_result[0]) = 0x9611c3985b3159f5; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x9727b8499727b849; ++ *((unsigned long *)&__m128i_result[0]) = 0x12755900b653f081; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x02f3030303030303; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x06d9090909090909; ++ __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff81ffff7f03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04ffff8101ff81ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0000001e000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a000000f6000000; ++ __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x317fce80317fce80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c +new file mode 100644 +index 000000000..8357f4e80 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c +@@ -0,0 +1,434 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001f7fc100000404; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000002a000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff0101ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffa0204000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe1ffc100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000400000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000009000900; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000009000900; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffc3; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff9dff9dff9dff9d; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000efffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe50000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffe020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fc00000010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001b0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff81007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffb7005f0070007c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000007c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000005f0003e000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffbfc0ffffbfc0; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff0100000001; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x478b478b38031779; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6b769e690fa1e119; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fe98c2a0; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff9411; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff9411; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000100000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x37b951002d81a921; ++ __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000e0000000e0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000e0000000e0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000c400; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffff98dea; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000f80007; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000006c80031; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010001; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004280808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010203030201000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000808080800; ++ __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000b5207f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2000000020000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200200000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x6a57a30ff0000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff700000009; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8001000180010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8001000184000800; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff80007e028401; ++ *((unsigned long *)&__m128i_result[0]) = 0x9a10144000400000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000bd003d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000077af9450; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000047404f4f040d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000214f; ++ *((unsigned long *)&__m128i_result[0]) = 0xc31b63d846ebc810; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c +new file mode 100644 +index 000000000..e4afc8247 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe01fe01fe01fe01; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fe01fe01; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000200020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long *)&__m128i_op0[0]) = 0x17483c07141b5971; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd4bade5e2e902836; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x345002920f3017d6; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000c0010000a186; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00067fff0002a207; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0002; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_result[0]) = 0x05fafe0101fe000e; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc1f03e1042208410; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00f0001000000010; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a80613fda5dcb4a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x93f0b81a914c003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000051649b6; ++ *((unsigned long *)&__m128i_result[0]) = 0xd2f005e44bb43416; ++ __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001fffe00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff000f0008d3c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff000f0008d3c; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0xe10000004deb2610; ++ *((unsigned long *)&__m128i_result[0]) = 0xe101e0014dec4089; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_op1[0]) = 0x11111131111116a6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2028000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001001100110068; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd400c02000002acf; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf4000020c4000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fdec000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op1[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000021100000211; ++ *((unsigned long *)&__m128i_result[0]) = 0xfb141d31fb141d31; ++ __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f800000976801fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x837c1ae57f8012ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x976801fd6897fe02; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8012ec807fed13; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000100010001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0909090900000909; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0909090909090909; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a80613fda5dcb4a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x93f0b81a914c003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1e242e4d68dc0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2ff8fddb7ae20000; ++ __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffe8081000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c +new file mode 100644 +index 000000000..346f0316a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c +@@ -0,0 +1,245 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ff01ff01ff01; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01ff01ff01fc10; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0042003e0042002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffc0001fffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffbeffc2ffbeffd1; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80000000fff80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000004000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff8004000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff8607db959f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff0cff78ff96ff14; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000008a0000008a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000008900000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000043c5ea7b6; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000008fc4ef7b4; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff46; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffe00000002; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff46000000ba; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8f8372f752402ee; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80044def00000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007f8449a19084; ++ *((unsigned long *)&__m128i_result[0]) = 0x49a210000000ff00; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfd000000fb00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fe00f8000700; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xfdfef9ff0efff900; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x7afafaf88a050a05; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_result[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_result[0]) = 0x5779108fdedda7e4; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0xd48acbfe13102acf; ++ *((unsigned long *)&__m128i_result[0]) = 0xf4af70d0c4000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000056; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffff86; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_result[0]) = 0xf8e1a03affffe3e2; ++ __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c +new file mode 100644 +index 000000000..6eea49a61 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c +@@ -0,0 +1,272 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0100010000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0100010000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x387c7e0a133f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000004870ba0; ++ __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefe000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000155; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ff0010000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ff0010000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x440ef000440ef000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4400000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0f8d33000f8d3300; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003b80000000000; ++ __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0018001800180018; ++ *((unsigned long *)&__m128i_op1[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd83c8081ffff808f; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc45a851c40c18; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3e1f321529232736; ++ *((unsigned long *)&__m128i_op1[0]) = 0x161d0c373c200826; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003f8000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff000000007fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffdfffdf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6fde000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fef01000f27ca; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000010000010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000ffef0010000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000044525043c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc03fc040; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7f00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000400028000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc110000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc00d060000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf047ef0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff100000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c +new file mode 100644 +index 000000000..f3e4e0390 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c +@@ -0,0 +1,282 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004e005500060031; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff870068fff5ffb3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfa31dfa21672e711; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1304db85e468073a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000150000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffeffff001effff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffff1a0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000f00f; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4); ++ *((unsigned long *)&__m128i_op0[1]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000d82; ++ *((unsigned long *)&__m128i_result[0]) = 0x046a09ec009c0000; ++ __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f3f018000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf0fd800080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000a00028004000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000005a00000228; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff9ee000004ec; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f54e0ab00000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00e4880080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080810080808100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff011fb11181d8ea; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80ff800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe00fe000200fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe000200fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00fd02fe00002302; ++ *((unsigned long *)&__m128i_result[0]) = 0x007ffd0200000000; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffe0001fffe0001; ++ __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d27e00682a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x14ccc631eb3339ce; ++ *((unsigned long *)&__m128i_result[0]) = 0x685670d197a98f2e; ++ __m128i_out = __lsx_vmulwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffc0ffc0003f003f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff941d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000ffff000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000077529b522400; ++ __m128i_out = __lsx_vmulwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000111111312; ++ *((unsigned long *)&__m128i_result[0]) = 0x2222272111111410; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff800000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0015172b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffffff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000600000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000c6c7; ++ *((unsigned long *)&__m128i_result[0]) = 0x8d8d8d8d8d8cc6c6; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a0000000a000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f7f00007f7f7500; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3b42017f3a7f7f01; ++ *((unsigned long *)&__m128i_result[1]) = 0x04faf60009f5f092; ++ *((unsigned long *)&__m128i_result[0]) = 0x04fafa9200000000; ++ __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c +new file mode 100644 +index 000000000..9f5702e2c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c +@@ -0,0 +1,308 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff020000fff4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001ee100000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f5ec0a0feefa0b0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff02d060; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000000fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000004a294b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000006d04bc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x55aa55aa55aa55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa55555655aaaaa8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ef4002d21fc7001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x28bf02d1ec6a35b2; ++ *((unsigned long *)&__m128i_result[1]) = 0x2a7b7c9260f90ee2; ++ *((unsigned long *)&__m128i_result[0]) = 0x1b1c6cdfd57f5736; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000004040504; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000004040504; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000feff01; ++ *((unsigned long *)&__m128i_result[0]) = 0x00feff0100000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010202050120; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010102020202; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0ae3072529fbfe78; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x030804010d090107; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_result[1]) = 0x0039d21e3229d4e8; ++ *((unsigned long *)&__m128i_result[0]) = 0x6d339b4f3b439885; ++ __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c +new file mode 100644 +index 000000000..9441ba50e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c +@@ -0,0 +1,321 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffeffffffff; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffffffc; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffff01; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000fff3; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff0001ffffff0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100ff010101f6; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100010000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffffeff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffbff8888080a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x080803ff807ff7f9; ++ *((unsigned long *)&__m128i_result[1]) = 0x010105017878f8f6; ++ *((unsigned long *)&__m128i_result[0]) = 0xf8f8fd0180810907; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffdffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffeffff; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x441ba9fcffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x181b2541ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xbbe5560400010001; ++ *((unsigned long *)&__m128i_result[0]) = 0xe7e5dabf00010001; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000060a3db; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa70594c000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ff9f5c25; ++ *((unsigned long *)&__m128i_result[0]) = 0x58fa6b4000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vneg_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000008000001e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff7fffffe2; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_result[0]) = 0x377b810912c0e000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffc00001ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x003ffffe00800000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vneg_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x087c000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000087c; ++ *((unsigned long *)&__m128i_result[1]) = 0xf784000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffff784; ++ __m128i_out = __lsx_vneg_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vneg_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c +new file mode 100644 +index 000000000..a7a3acce9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c +@@ -0,0 +1,109 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00070007; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff0007ffff; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xce23d33e43d9736c; ++ *((unsigned long*)& __m128i_op0[0]) = 0x63b2ac27aa076aeb; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x31dc2cc1bc268c93; ++ *((unsigned long*)& __m128i_result[0]) = 0x9c4d53d855f89514; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000c; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff3; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e04; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000400080003fff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000bc2000007e04; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffbfff7fffc000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff43dfffff81fb; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35; ++ *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49; ++ *((unsigned long*)& __m128i_op1[1]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_op1[0]) = 0x5252525252525252; ++ *((unsigned long*)& __m128i_result[1]) = 0xada4808924882588; ++ *((unsigned long*)& __m128i_result[0]) = 0xacad25090caca5a4; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe0000ff18; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c +new file mode 100644 +index 000000000..a07a02ab2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c +@@ -0,0 +1,91 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000; ++ *((unsigned long*)& __m128i_result[1]) = 0xcccccccc0000cccc; ++ *((unsigned long*)& __m128i_result[0]) = 0xcccccccc0000cccc; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x33); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xa6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; ++ *((unsigned long*)& __m128i_result[1]) = 0x9292929292929292; ++ *((unsigned long*)& __m128i_result[0]) = 0x8090808280909002; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x6d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3838383838300010; ++ *((unsigned long*)& __m128i_result[0]) = 0x3818200838383838; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xc7); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f; ++ *((unsigned long*)& __m128i_result[1]) = 0x5d5d5d5d5d5d5d5d; ++ *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d5d5d5d0000; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xa2); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x7f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long*)& __m128i_result[1]) = 0x1313131313131313; ++ *((unsigned long*)& __m128i_result[0]) = 0x1313131313131313; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0xec); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x9d9d9d9d9d9d9d9d; ++ *((unsigned long*)& __m128i_result[0]) = 0x9d9d9d9d9d9d9d9d; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x62); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00f525682ffd27f2; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00365c60317ff930; ++ *((unsigned long*)& __m128i_result[1]) = 0xe500c085c000c005; ++ *((unsigned long*)& __m128i_result[0]) = 0xe5c1a185c48004c5; ++ __m128i_out = __lsx_vnori_b(__m128i_op0,0x1a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c +new file mode 100644 +index 000000000..537a1bb3b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c +@@ -0,0 +1,169 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x7e44bde9b842ff23; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00011e80007edff8; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffc001fffffffff; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f804f804f804f80; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x3e035e51522f0799; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x3e035e51522f0799; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfff8000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xfff8000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xfff8000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80005613; ++ *((unsigned long*)& __m128i_op0[0]) = 0x81000080806b000b; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffff00011cf0c569; ++ *((unsigned long*)& __m128i_op1[0]) = 0xc0000002b0995850; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff9cf0d77b; ++ *((unsigned long*)& __m128i_result[0]) = 0xc1000082b0fb585b; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffbfffb; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x0001ffff0101ffff; ++ *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffc105d1aa; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbc19ecca; ++ *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffffff9bffbfb; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffdffdfb; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c +new file mode 100644 +index 000000000..8a6e035c9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c +@@ -0,0 +1,123 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8282828282828282; ++ *((unsigned long*)& __m128i_result[0]) = 0x8282828282828282; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x82); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x7505853d654185f5; ++ *((unsigned long*)& __m128i_op0[0]) = 0x01010000fefe0101; ++ *((unsigned long*)& __m128i_result[1]) = 0x7545c57d6541c5f5; ++ *((unsigned long*)& __m128i_result[0]) = 0x41414040fefe4141; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x40); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long*)& __m128i_result[1]) = 0x7474f6fd7474fefe; ++ *((unsigned long*)& __m128i_result[0]) = 0xf474f6fef474f6fe; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x74); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3d3d3d3d3d3d3d3d; ++ *((unsigned long*)& __m128i_result[0]) = 0x3d3d3d3d3d3d3d3d; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x3d); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long*)& __m128i_result[1]) = 0xfffffadffedbfefe; ++ *((unsigned long*)& __m128i_result[0]) = 0x5f5f7bfedefb5ada; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x5a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x38); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0d1202e19235e2bc; ++ *((unsigned long*)& __m128i_op0[0]) = 0xea38e0f75f6e56d1; ++ *((unsigned long*)& __m128i_result[1]) = 0x2f3626e7b637e6be; ++ *((unsigned long*)& __m128i_result[0]) = 0xee3ee6f77f6e76f7; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x26); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long*)& __m128i_result[1]) = 0xd6d7ded7ded7defe; ++ *((unsigned long*)& __m128i_result[0]) = 0xd6d7ded7ded7defe; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0xd6); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0xfffe0000fffe0000; ++ *((unsigned long*)& __m128i_result[1]) = 0x7777777777777777; ++ *((unsigned long*)& __m128i_result[0]) = 0xffff7777ffff7777; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x77); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x55); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xd454545454545454; ++ *((unsigned long*)& __m128i_result[0]) = 0xd454545454545454; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x54); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x4f); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x8a8a8a8a8a8a8a8a; ++ *((unsigned long*)& __m128i_result[0]) = 0x8a8a8a8a8a8a8a8a; ++ __m128i_out = __lsx_vori_b(__m128i_op0,0x8a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c +new file mode 100644 +index 000000000..bb59bc312 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c +@@ -0,0 +1,109 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x00d3012b015700bb; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00010000ffca0070; ++ *((unsigned long*)& __m128i_result[1]) = 0xff2cfed4fea8ff44; ++ *((unsigned long*)& __m128i_result[0]) = 0xfffeffff0035ff8f; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0x00fe00fe00fe0045; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0x00fe00fe00fe00fe; ++ *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe00fe0045; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000010000010101; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0101000001000100; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_op0[0]) = 0xe4423f7b769f8ffe; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00050eb00000fffa; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a50000f310; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00bbfff7fffffff7; ++ *((unsigned long*)& __m128i_op0[0]) = 0xffffffff008ff820; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe1ffc0; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffff009ff83f; ++ __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c +new file mode 100644 +index 000000000..030e87fd8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c +@@ -0,0 +1,452 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf4b6f3f52f4ef4a8; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00f900d7003d00e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x003e00d100de002b; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc2f9bafac2fac2fa; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbdf077eee7e20468; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe3b1cc6953e7db29; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000e7e20468; ++ *((unsigned long *)&__m128i_result[0]) = 0xc2fac2fa53e7db29; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf8f8e018f8f8e810; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf8f8f008f8f8f800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000f0080000f800; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1211100f11100f0e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x100f0e0d0f0e0d0c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f000d200e000c20; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ffe7ffe7ffe7ffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007ffe00007ffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001c00ffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007f7f00007f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x000001000f00fe00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000017fff00fe7f; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000f0009d3c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000016fff9d3d; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff000f0008d3c; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff0016fff8d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000003c3c; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff0101ffff3d3d; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000958affff995d; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffefffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffefffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002fffefffd0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0202fe02fd020102; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5a6f5c53ebed3faa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa36aca4435b8b8e1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5a6f5c53ebed3faa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa36aca4435b8b8e1; ++ *((unsigned long *)&__m128i_result[1]) = 0x5c535c533faa3faa; ++ *((unsigned long *)&__m128i_result[0]) = 0xca44ca44b8e1b8e1; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_result[1]) = 0x04c0044a0400043a; ++ *((unsigned long *)&__m128i_result[0]) = 0x04c004d6040004c6; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000006362ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d0000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000dffff000d; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[1]) = 0x2080208020802080; ++ *((unsigned long *)&__m128i_result[0]) = 0x2080208020802080; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000001b0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001b0000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000053a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff9000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc000400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffc000400000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001f00000000; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe593c8c4e593c8c4; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080000080800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x9380c4009380c400; ++ __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc2007aff230027; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080005eff600001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01017f3c00000148; ++ *((unsigned long *)&__m128i_op1[0]) = 0x117d7f7b093d187f; ++ *((unsigned long *)&__m128i_result[1]) = 0xff23002700000148; ++ *((unsigned long *)&__m128i_result[0]) = 0xff600001093d187f; ++ __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002711250a27112; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00d2701294027112; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff7112ffff7112; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff7012ffff7112; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_op1[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op1[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[1]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[0]) = 0x020310d0c0030220; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000efffefff; ++ *((unsigned long *)&__m128i_result[0]) = 0xa03aa03ae3e2e3e2; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x02b010f881a281a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8140001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000010f8000081a2; ++ *((unsigned long *)&__m128i_result[0]) = 0x000069bb00000001; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c +new file mode 100644 +index 000000000..783eedae1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c +@@ -0,0 +1,461 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000020100; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffc00000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x03574e3a03574e3a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000003a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000015; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op1[1]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe0404041c0404040; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_result[0]) = 0x803f800080000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe80ff80ffff0000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x11000f2000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f000d2000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f8000004f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000c000ffffc000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000006f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000c00000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2222272011111410; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2222272011111410; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffef8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffdfffdfffdffee0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffdfffdf; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010100000100000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100000101000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000000010; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128i_result[1]) = 0x21011f3f193d173b; ++ *((unsigned long *)&__m128i_result[0]) = 0xff39ff37ff35ff33; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80806362; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00008080; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404050404040404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404050404040404; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000004040504; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000004040504; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000807f80808000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80006b0000000b00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000807f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80006b0080808080; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000400000004000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00004000ffffffff; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000080008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffefffe00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffe00000000; ++ __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x00cd006300cd0063; ++ *((unsigned long *)&__m128i_result[0]) = 0x00cd006300cd0063; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03aa558e1d37b5a1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff80fd820000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000084d12ce; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x002e0059003b0000; ++ __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffaeffaeffaeffae; ++ *((unsigned long *)&__m128i_result[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_result[0]) = 0x001effae001effae; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000440efffff000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000003b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000440efffff000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff2356fe165486; ++ __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000cecd00004657; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000c90000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00019d9a00008cae; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c +new file mode 100644 +index 000000000..66982d89f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c +@@ -0,0 +1,350 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x7); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000003c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000800000008; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0701000007010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0701000000000000; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x807f7f8000ffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00feff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0107070100080800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080800070800; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303; ++ *((unsigned long *)&__m128i_result[0]) = 0x0303030303030303; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op0[0]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000009; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000010000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0ba00ba00ba00ba0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0ba00ba00ba011eb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000a0000000a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000a0000000d; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfbfbfb17fbfb38ea; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfbfb47fbfbfb0404; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000029; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffbfc0ffffbfc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000032; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000900050007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0800080008000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe160065422d476da; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000d00000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000b00000010; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010100000101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0103000201030002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000200000001e; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000200000001e; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbbe5560400010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe7e5dabf00010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000b000500010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000c00010001; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001f0000001f; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_h (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000600007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000008ffffa209; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000016; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000467fef81; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000013; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_d (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000fe03fe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fe01fe01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000007020701; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000007010701; ++ __m128i_out = __lsx_vpcnt_b (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000000e; ++ __m128i_out = __lsx_vpcnt_w (__m128i_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c +new file mode 100644 +index 000000000..58591f1bb +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c +@@ -0,0 +1,362 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc2409edab019323f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x460f3b393ef4be3a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x460f3b393ef4be3a; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004007c00fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fc0000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffefefefe; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4811fda96793b23a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8f10624016be82fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfda9b23a624082fd; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfd293eab528e7ebe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffeb48e03eab7ebe; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff00010000fff; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000120002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000200000013fa0; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000013fa0; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000f7d1000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x773324887fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000017161515; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000095141311; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000017fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x1716151595141311; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[0]) = 0x4040404040404040; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ef400ad21fc7081; ++ *((unsigned long *)&__m128i_op1[0]) = 0x28bf0351ec69b5f2; ++ *((unsigned long *)&__m128i_result[1]) = 0xdfa6e0c6d46cdc13; ++ *((unsigned long *)&__m128i_result[0]) = 0x21fc7081ec69b5f2; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04c0044a0400043a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04c004d6040004c6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x044a043a04d604c6; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001b4a00007808; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc03fc000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc03fc000000004; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffffffff; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000103030102ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010102ffff; ++ __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x3b5eae24ab7e3848; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000009c83e21a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000022001818; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000e21a00001818; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c +new file mode 100644 +index 000000000..74269e319 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c +@@ -0,0 +1,336 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000001; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf436f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff51cf8da; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffd6040188; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000101fffff8b68; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000b6fffff8095; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff51cffffd604; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xa); ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff0cffffff18; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefffefffeff6a0c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc2f9bafac2fac2fa; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x11000f200f000d20; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000805; ++ *((unsigned long *)&__m128i_op0[0]) = 0x978d95ac768d8784; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000408; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff91fffffff5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff00650001ffb0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffff0001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ca02f854; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ca0200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ca0200000000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000c6c6ee22; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c6c62e8a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000c6c6ee22; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6c62e8a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d001b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0006000000040000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363636363636363; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fc03fc000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f1fd800000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f1f00003f3f0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f3f00007f1f0000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff9f017f1fa0b199; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1197817fd839ea3e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000033; ++ *((unsigned long *)&__m128i_result[1]) = 0xff011fb11181d8ea; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080808000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffefffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_result[1]) = 0x67ebb2ebc884ed3f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003ddc; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c +new file mode 100644 +index 000000000..acca2bee9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c +@@ -0,0 +1,488 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x4); ++ int_result = 0x0000000000000000; ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01fc020000fe0100; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000463fd2902d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5ccd54bbfcac806c; ++ unsigned_int_result = 0x00000000000000ac; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd705c77a7025c899; ++ unsigned_int_result = 0x000000000000edfa; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0400040004000400; ++ unsigned_int_result = 0x0000000000000400; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007d3ac600; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x7); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dffbfff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0200400000000001; ++ unsigned_int_result = 0x0000000000000001; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003fffffff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffffff; ++ long_int_result = 0x00000001ffffffff; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00a6ffceffb60052; ++ unsigned_int_result = 0x0000000000000084; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xa); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xc); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000ffffffff; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ long_int_result = 0xffffffffffffffff; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ unsigned_long_int_result = 0x3f8000003f800000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0bd80bd80bd80bd8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0bd80bd80bd80bd8; ++ unsigned_long_int_result = 0x0bd80bd80bd80bd8; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x8); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xb); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000100c6ffef10c; ++ unsigned_int_result = 0x00000000000000ff; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207f7f; ++ unsigned_int_result = 0x0000000020202020; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ int_result = 0x0000000000003a24; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ unsigned_int_result = 0x00000000000000ff; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x9); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xb); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffe080f6efc100f7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xefd32176ffe100f7; ++ int_result = 0x0000000000002176; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ long_int_result = 0x0000000000000000; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ int_result = 0x0000000000000002; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80008000ec82ab51; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000800089e08000; ++ int_result = 0x0000000089e08000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fdec000000000; ++ int_result = 0x000000001d6e5000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fdec000000000; ++ int_result = 0x0000000001d6e5000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112; ++ int_result = 0x000000009c0d6112; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ unsigned_int_result = 0x000000000000857a; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49; ++ int_result = 0x00000000ffff8a35; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ unsigned_int_result = 0x000000000000001e; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112; ++ int_result = 0x000000009c0d6112; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ unsigned_int_result = 0x000000000000857a; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49; ++ int_result = 0x00000000ffff8a35; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ unsigned_int_result = 0x000000000000001e; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x8); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x000000000000001e; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_int_result = 0x0000000000000000; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ long_int_result = 0x000000003ddc5dac; ++ long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, long_int_result, long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6453f5e01d6e5000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fdec000000000; ++ int_result = 0x000000001d6e5000; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ int_result = 0x00000000ffffffff; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112; ++ int_result = 0x000000009c0d6112; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ unsigned_int_result = 0x000000000000857a; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1); ++ ASSERTEQ_int (__LINE__, int_out, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49; ++ int_result = 0x00000000ffff8a35; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x4); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae; ++ unsigned_int_result = 0x000000000000001e; ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0674c8868a74fc80; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906; ++ int_result = 0x00000000090b0906; ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_result = 0x0000000000000000; ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x3); ++ ASSERTEQ_int (__LINE__, int_result, int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ unsigned_int_result = 0x00000000000000ff; ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xc); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ unsigned_long_int_result = 0x0000000000000000; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f00004f4f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f00004f4f0000; ++ unsigned_int_result = 0x000000004f4f0000; ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000e0000000e; ++ unsigned_long_int_result = 0x0000000e0000000e; ++ unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0); ++ ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c +new file mode 100644 +index 000000000..ef0ad676e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c +@@ -0,0 +1,20 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c +new file mode 100644 +index 000000000..a5f02b1b1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c +@@ -0,0 +1,212 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ int_op0 = 0x0000000059815d00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0400040004000400; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000400; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f8000003f800000; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000020202020; ++ *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020202020; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x000000007ff00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007ff00000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007ff00000; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ long_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_d (long_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x000000000000001e; ++ *((unsigned long *)&__m128i_result[1]) = 0x1e1e1e1e1e1e1e1e; ++ *((unsigned long *)&__m128i_result[0]) = 0x1e1e1e1e1e1e1e1e; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_w (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_h (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_op0 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplgr2vr_b (int_op0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c +new file mode 100644 +index 000000000..463adb48e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c +@@ -0,0 +1,300 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000045eef14fe8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ac; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x78c00000ff000000; ++ int_op1 = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0xff000000ff000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000000ff000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe0404041c0404040; ++ int_op1 = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_result[0]) = 0xe0404041e0404041; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffff0001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ int_op1 = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffb4ff; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffb4ff; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000020202020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x000000007ff00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000020006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffff4; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff4; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffff00ff00ff00; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ int_op1 = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b; ++ int_op1 = 0xffffffff89e08000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001b0000001b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001b0000001b; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefeeffef7fefe; ++ int_op1 = 0xffffffff9c0d6112; ++ *((unsigned long *)&__m128i_result[1]) = 0xbffefdfebffefdfe; ++ *((unsigned long *)&__m128i_result[0]) = 0xbffefdfebffefdfe; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff800000ff800000; ++ __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf; ++ int_op1 = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fbf0fbf0fbf0fbf; ++ *((unsigned long *)&__m128i_result[0]) = 0x0fbf0fbf0fbf0fbf; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0x00000000090b0906; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ int_op1 = 0xffffffffffff8a35; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x05dfffc3ffffffc0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000047fe2f0; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000047fe2f0; ++ __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffe011df03e; ++ int_op1 = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf03ef03ef03ef03e; ++ *((unsigned long *)&__m128i_result[0]) = 0xf03ef03ef03ef03e; ++ __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c +new file mode 100644 +index 000000000..a81be76f1 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c +@@ -0,0 +1,293 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000055555501; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000005555555554; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000036280000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x42a0000042a02000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd705c77a7025c899; ++ *((unsigned long *)&__m128i_result[1]) = 0xedfaedfaedfaedfa; ++ *((unsigned long *)&__m128i_result[0]) = 0xedfaedfaedfaedfa; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000300000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000a0a08000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5350a08000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80010009816ac5de; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8001000184000bd8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0bd80bd80bd80bd8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0bd80bd80bd80bd8; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1149a96eb1a08000; ++ *((unsigned long *)&__m128i_result[1]) = 0xb1a08000b1a08000; ++ *((unsigned long *)&__m128i_result[0]) = 0xb1a08000b1a08000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffcc9a989a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_result[1]) = 0xadadadadadadadad; ++ *((unsigned long *)&__m128i_result[0]) = 0xadadadadadadadad; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a753500a9fa0d06; ++ *((unsigned long *)&__m128i_result[1]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_result[0]) = 0x0d060d060d060d06; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c +new file mode 100644 +index 000000000..c42440cea +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2001240128032403; ++ *((unsigned long *)&__m128i_op1[0]) = 0x288b248c00010401; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffdfffefffff7ffe; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5); ++ *((unsigned long *)&__m128i_op0[1]) = 0x2700000000002727; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000002727; ++ *((unsigned long *)&__m128i_op1[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd705c77a7025c899; ++ *((unsigned long *)&__m128i_result[1]) = 0xc9c00000000009c9; ++ *((unsigned long *)&__m128i_result[0]) = 0x0013938000000000; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100100000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000000020000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200200000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x203e16d116de012b; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x9f009f009f009f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x9f009f009f009f00; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000004fc04f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000004fc04f80; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000958affff995d; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000de0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7bffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7bffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xf7ffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xf7feffffffffffff; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0ba00ba00ba00ba0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0ba00ba00ba011eb; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf1819b7c0732a6b6; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffb9917a6e7fffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x05d0ba0002e8802e; ++ *((unsigned long *)&__m128i_result[0]) = 0xd005e802174023d6; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000691a6c843c8fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x000691a6918691fc; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2; ++ *((unsigned long *)&__m128i_result[1]) = 0xc000000fc0003fff; ++ *((unsigned long *)&__m128i_result[0]) = 0xbffffff0ffffc00f; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[1]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_result[0]) = 0xffefffefffefffef; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001010002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010002; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4e3e133738bb47d2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x9c7c266e71768fa4; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000014414104505; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1011050040004101; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a323b5430048c; ++ *((unsigned long *)&__m128i_result[0]) = 0x008f792cab1cb915; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001e03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000780c00000; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00020000ffff0001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000b000b000b000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000b000b000b; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0005840100000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0005847b00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x636363633f3e47c1; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41f8e080f1ef4eaa; ++ *((unsigned long *)&__m128i_result[1]) = 0xa000308000008002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0500847b00000000; ++ __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c +new file mode 100644 +index 000000000..4ae4dbf8b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c +@@ -0,0 +1,294 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000000020000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0d1bffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd915e98e2d8df4d1; ++ *((unsigned long *)&__m128i_result[1]) = 0xd0b1ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x9d519ee8d2d84f1d; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op0[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0x887c8beb969e00f2; ++ *((unsigned long *)&__m128i_result[0]) = 0x101f8b680b6f8095; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800000008000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800000008000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000c00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffeff400000df4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff03fe; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe9df0000e81b; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000de00003e14; ++ *((unsigned long *)&__m128i_result[0]) = 0x00012b15ffff32ba; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80001b155b4b0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x80001b155b4b0000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffefffff; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111311111114111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111311111112111; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff800000003; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001f80007fff80; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe1ffff801f7f; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff0000ffff0000f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff02d060; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff02d060; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x27b9331b8e77ead9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x58d6bf1867ace738; ++ *((unsigned long *)&__m128i_result[1]) = 0xe4cc6c9edfab6639; ++ *((unsigned long *)&__m128i_result[0]) = 0x5afc6163b39ce19e; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vrotri_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c +new file mode 100644 +index 000000000..1bc27c983 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff3c992b2e; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff730f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff3c992b2e; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff730f; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_result[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002bfd9461; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001021; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00d3012acc56f9bb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001021; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80808080806b000b; ++ __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x3c5fffffff7fffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffeff00feff; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x40f3fa0000000000; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000008a0000008a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000008900000009; ++ *((unsigned long *)&__m128i_op1[1]) = 0x63637687636316bb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x6363771163631745; ++ *((unsigned long *)&__m128i_result[0]) = 0x636363ec6363636c; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fefefe68; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x028c026bfff027af; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000000010000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7fff003f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7fff003f800000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000820202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fe01fc0005fff4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000820205a44; ++ *((unsigned long *)&__m128i_result[0]) = 0x013bc084078278b5; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000140001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000140001; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x67eb85b0b2ebb001; ++ *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff14eb54ab; ++ *((unsigned long *)&__m128i_result[0]) = 0x14ea6a002a406a00; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xce9035c49ffff570; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0xce9035c49ffff574; ++ __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000040d; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001000000ff; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000002fffffffb; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000fffb; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c +new file mode 100644 +index 000000000..67d189991 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c +@@ -0,0 +1,345 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x203e16d116de012b; ++ *((unsigned long *)&__m128i_result[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x203e16d116de012b; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffebd06fffe820c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7ffe7fff3506; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffebd06fffe820c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7ffe7fff3506; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0cffffff18; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefffefffeff6a0c; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff60ca7104649; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff790a15db63d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffff60ca710464a; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff790a15db63e; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff46; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x5fff5e97e2ff5abf; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefffefffefffeff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000100010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001001100110068; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfeffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffb81a6f70; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d48eaa1a2; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffb81ae0bf; ++ *((unsigned long *)&__m128i_result[0]) = 0x00012c9748eaffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000d0000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x8006000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8002000d00000014; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000600007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000008ffffa209; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000600007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000008ffffa209; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x636363633f3e47c1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e080f1ef4eaa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000807bf0a1f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000800ecedee68; ++ *((unsigned long *)&__m128i_result[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e880ffffffff; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ebd20000714f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00012c8a0000a58a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000e29e; ++ *((unsigned long *)&__m128i_result[0]) = 0x000259140000ffff; ++ __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffeffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffeffffffff; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x342caf9be55700b5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00040003ff83ff84; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00040003ff4dffca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c07e181ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x3430af9effffffff; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffa8ff9f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffabff99; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000100000002007d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000020001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00010000ffab001c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffffffadff9a; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800080008000800; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x76f424887fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc110000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc00d060000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xc110000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff7fffffff; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfbfbfb17fbfb38ea; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfbfb47fbfbfb0404; ++ *((unsigned long *)&__m128i_result[1]) = 0xfbfbfb17fbfb3919; ++ *((unsigned long *)&__m128i_result[0]) = 0xfbfb47fbfbfb042d; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808081; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x80808080ffffffff; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00123fff00120012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0012001200120012; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00123fff00120012; ++ *((unsigned long *)&__m128i_result[0]) = 0x001200120017004c; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_result[0]) = 0xc5c534920000c4ed; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000aa822a79308f6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03aa558e1d37b5a1; ++ *((unsigned long *)&__m128i_result[1]) = 0x00155044ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x03aa558e2584c86f; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_result[0]) = 0x030298a6a1030a49; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007a8000000480; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000485000004cc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007a8000000480; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000485000004cc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000090a00000998; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004eff6200d2ff76; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff70002800be00a0; ++ *((unsigned long *)&__m128i_result[1]) = 0x004eff6200d2ff76; ++ *((unsigned long *)&__m128i_result[0]) = 0xff70002800be00a0; ++ __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c +new file mode 100644 +index 000000000..cd8eefb47 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c +@@ -0,0 +1,231 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf000000000000000; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x03ff0101fc010102; ++ *((unsigned long *)&__m128i_result[0]) = 0x03fffffffc010102; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsat_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff8383ffff7d0d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe000ffff1fff; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8da00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00ffff00; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000010001; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000c000ffffc000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0038d800ff000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fffe00fffffe00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0038f000ff000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fffe00fffffe00; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f0000003f0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f0000003f0000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0674c886fcba4e98; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906; ++ *((unsigned long *)&__m128i_result[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0ffc0003f003f; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x007fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x007fffffffffffff; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000017f0a82; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000003f; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8006000080020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8004000080020000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff8fffffff8; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff8fffffff8; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff00000000f; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003ff8; ++ __m128i_out = __lsx_vsat_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_d (__m128i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c +new file mode 100644 +index 000000000..31e3919bf +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c +@@ -0,0 +1,272 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff1739ffff48aa; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff2896ffff5b88; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f3f17393f3f3f3f; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f3f283f3f3f3f3f; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000001fc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010100000000; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffcc000b000b000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000b000b010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7f000b000b000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x000b000b010a000b; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000068; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffcd63ffffcd63; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffd765ffffd765; ++ *((unsigned long *)&__m128i_result[1]) = 0x1f1f1f1f1f1f1f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x1f1f1f1f1f1f1f1f; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000120000000d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f00000000003f; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f000000000000; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000006de1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5f9ccf33cf600000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0007000700070000; ++ __m128i_out = __lsx_vsat_hu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bd3d00000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003fff00003fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003fff00003fff; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a323b5430048c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x008f792cab1cb915; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a323b00ffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x008f792c00ffffff; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_wu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x20); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636389038903; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636389038903; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000001ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000001ffff; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x22); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000001fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000001fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa8a74bff9e9e0070; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9e9e72ff9e9ff9ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vsat_du (__m128i_op0, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c +new file mode 100644 +index 000000000..4362941ab +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c +@@ -0,0 +1,470 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ed0008005e00a2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007a007600150077; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ed0008005e00a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007a007600150077; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfda9b23a624082fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff7f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2d1da85b7f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505853d654185f5; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010000fefe0101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1fc000001fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1fc000001fc00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000067400002685; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9795698585057dec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x87f82867431a1d08; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1149a96eb1a08000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffe1ffffffe1; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffe1ffffffe1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m128i_op1[0]) = 0x363d753d50155c0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f0f0f0f00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000fffe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000adadadad; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5b5b5b5aadadadad; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000052525253; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00ffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00ffffffffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x33f5c2d7d9f5d800; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe4c23ffb002a3a22; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000044470000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0000ffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000005c000000b2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000007600000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffffffff; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0dec4d1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffbfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000048; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffeffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000090900000998; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00ffffff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f7f007f7f7f00; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0b73e427f7cfcb88; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff01fe03ff01fe03; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c +new file mode 100644 +index 000000000..c16a291de +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c +@@ -0,0 +1,328 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000c3080002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfedb27095b6bff95; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040000000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010000000100000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000100000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000001000f00fe00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000017fff00fe7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, 9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007ffd0001400840; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff01ff010000ff7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long *)&__m128i_op0[0]) = 0x061202bffb141c38; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fef01000f27ca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, -1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff80ff00ff80ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001a0000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_b (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000002a001a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000001a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05f5e2320605e1e2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_d (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff2356fe165486; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5efeb3165bd7653d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vseqi_w (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vseqi_h (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c +new file mode 100644 +index 000000000..4e7fcc02b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c +@@ -0,0 +1,394 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000007f00000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000401000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long *)&__m128i_result[0]) = 0x0404040404000404; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x418181017dfefdff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff81; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op2[1]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00adadad00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00adadad00000000; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xedededededededed; ++ *((unsigned long *)&__m128i_result[0]) = 0xedededededededed; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x04040403fafafafc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff80; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000001a0000000b; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000080000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff6cffb5ff98ff6e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd7ff8dffa4ff7a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_op1[0]) = 0xee297a731e5c5f86; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000868686868686; ++ __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000d000d000d000d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000d000d000d000d; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_op2[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0909000009090000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x002a05a2f059094a; ++ *((unsigned long *)&__m128i_op2[0]) = 0x05ad3ba576eae048; ++ *((unsigned long *)&__m128i_result[1]) = 0x0909e0480909e048; ++ *((unsigned long *)&__m128i_result[0]) = 0x0909e0480909e048; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffff29; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m128i_op2[0]) = 0x00000001ffffff29; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff2900000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op2[0]) = 0x010101fe0101fe87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101fe870101fe87; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101fe8700000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x2000002000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2000002020000020; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op2[1]) = 0x8000000100000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x8000000000000103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010300000103; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010300000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xada4808924882588; ++ *((unsigned long *)&__m128i_op0[0]) = 0xacad25090caca5a4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op1[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xdfa6e0c6d46cdc13; ++ *((unsigned long *)&__m128i_op0[0]) = 0x21fc7081ec69b5f2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002c002400; ++ *((unsigned long *)&__m128i_op2[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0xffffffff0015172b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff0015172b; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0015172b; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0003000f0003000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op2[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_op2[0]) = 0x030298a6a1030a49; ++ *((unsigned long *)&__m128i_result[1]) = 0x021b7d24c9678a35; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f7f00007f7f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f80807f7f8080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fffe0000fffe; ++ *((unsigned long *)&__m128i_op2[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffff10000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c +new file mode 100644 +index 000000000..cd441b841 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c +@@ -0,0 +1,348 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000030000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xc9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004007c00fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x047c0404fc00fcfc; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x8a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff7f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x85); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff51cf8da; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffd6040188; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff01018888; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x50); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007d00c50177ac5b; ++ *((unsigned long *)&__m128i_op0[0]) = 0xac82aa88a972a36a; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000c5ac01015b; ++ *((unsigned long *)&__m128i_result[0]) = 0xaaacac88a3a9a96a; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000a0000000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000a00000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0a0a0a000a0a0a00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0a0a0009090900; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003f8000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003f800000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xd2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x6c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x81); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000dffff000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffffff; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x965f5e9660e25a60; ++ *((unsigned long *)&__m128i_result[0]) = 0xff7f7fffff7f7fff; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x131211101211100f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x11100f0e100f0e0d; ++ *((unsigned long *)&__m128i_result[1]) = 0x13101213120f1112; ++ *((unsigned long *)&__m128i_result[0]) = 0x110e1011100d0f10; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xcb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000110; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000431f851f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001011010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000043431f1f; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xf0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xd1c0c0a5baf8f8d3; ++ *((unsigned long *)&__m128i_result[0]) = 0xecbbbbc5d5f3f3f3; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000454ffff9573; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000454ffff9573; ++ __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xa4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xf3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xd2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x007c000d00400000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000003f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007c00000040; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xb9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff0000; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xcd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x93); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f007f00007f7f; ++ __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x58); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080808000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080808000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x8b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffdfffdfffd; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefdbffefdfe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefeeffef7fefe; ++ *((unsigned long *)&__m128i_result[1]) = 0xfef7fefebffefdfe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefefefdfefefeef; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x002a001a001a000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000002a001a; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a000b00000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x78); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x98); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000010f8000081a2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000069bb00000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001000010f8; ++ __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000fffff800; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x8a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffda6e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffe3d6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xeeb1e4f4bc3763f3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6f5edf5ada6fe3d7; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffe3d6; ++ *((unsigned long *)&__m128i_result[0]) = 0xeeb1e4f4bc3763f3; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100200001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100200001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xce23d33e43d9736c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x63b2ac27aa076aeb; ++ *((unsigned long *)&__m128i_result[1]) = 0x63b2ac27aa076aeb; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0xc8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0xc9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0xbf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x801d5de0000559e0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77eb86788eebaf00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x89582bf870006860; ++ *((unsigned long *)&__m128i_op1[0]) = 0x89582bf870006860; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x94); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c +new file mode 100644 +index 000000000..0fb1bc18f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c +@@ -0,0 +1,425 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00003f803f800100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x870968c1f56bb3cd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf000e001bf84df83; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff8e001ff84e703; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ca354688; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff35cab978; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6a57a30ff0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe00fe00fe00fd01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fffefe0100f6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100010000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100010000010000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000183fffffe5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff3d06ffff4506; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffffffe7ffff800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff6fff6fff6fff6; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3f80000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3f80000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff8fff8fff8fff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52525252525252cb; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52525252525252cb; ++ *((unsigned long *)&__m128i_result[1]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_result[0]) = 0xaeaeaeaeaeaeae35; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x370bdfec00130014; ++ *((unsigned long *)&__m128i_result[0]) = 0x370bdfec00130014; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002020002020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x021f3b0205150600; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000300400002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000100010040fffb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000300400002; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010040fffb; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff801c9e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000810000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x008003496dea0c61; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101030100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ab6021f72496458; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7750af4954c29940; ++ *((unsigned long *)&__m128i_result[1]) = 0xe64afee18eb79ca8; ++ *((unsigned long *)&__m128i_result[0]) = 0x89b051b7ac3e67c0; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x441ba9fcffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x181b2541ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff81010102; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000045340a6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000028404044; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x67eb85af0000b000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000103; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003ffffe00800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004001be00dc008e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff0100010001; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff9fffefff9ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x04faf60009f5f092; ++ *((unsigned long *)&__m128i_op1[0]) = 0x04fafa9200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfc06066e00000000; ++ __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000667ae56; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000100020002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000100020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000100020002; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffe1ffc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffe1ffc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffe1ffc0; ++ __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c +new file mode 100644 +index 000000000..a26eb0a3d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c +@@ -0,0 +1,290 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x004200a000200000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000aaaaaaaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000aaab555b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000aaaaaaaa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000aaab555b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000ed0e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000004080; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004cff8fffde0051; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040400000404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000040400000404; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000501000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000008; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x55aa55aa55aa55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa55555655aaaaa8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6b6c4beb636443e3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0507070805070708; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000085af0000b000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00017ea200002000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100000001000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005003a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c +new file mode 100644 +index 000000000..15c6cedc2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c +@@ -0,0 +1,444 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0005000400000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0400001001150404; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0005000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0400001001150404; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000008680f1ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0280000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffffff00000000; ++ __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000036280000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x42a0000042a02000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80ff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff80000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff0600d50e9ef518; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffefffa8007c000f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000001faea9ec; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100007f01; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfbfbfb17fbfb38ea; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfbfb47fbfbfb0404; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000005fffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000003fffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000040002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000bffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f7f7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001000100010c410; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffcafff8ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a0; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000ed0e0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000004080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000ed0e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000004080; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x8); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003030000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00fffbfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01ff1100000048; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c +new file mode 100644 +index 000000000..0e72a33dd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c +@@ -0,0 +1,258 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000200008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffff00ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffff00ffff; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffffff00ffffff; ++ __m128i_out = __lsx_vslei_b (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00008080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x31dc2cc1bc268c93; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c4d53d855f89514; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_h (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, -7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5779108fdedda7e4; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_w (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000001fc1a568; ++ *((unsigned long *)&__m128i_op0[0]) = 0x02693fe0e7beb077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf1819b7c0732a6b6; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffb9917a6e7fffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0037ffc8d7ff2800; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf03ef03ef03ef03e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf03ef03ef03ef03e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c +new file mode 100644 +index 000000000..685a1bb36 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c +@@ -0,0 +1,293 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd82480697f678077; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1268f057137a0267; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0048137ef886fae0; ++ *((unsigned long *)&__m128i_result[1]) = 0xff000000ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff0000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000202fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff00ff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff7a53; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001b4a00007808; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_hu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003900; ++ *((unsigned long *)&__m128i_op0[0]) = 0x68bcf93435ed25ed; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000f0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00250023001c001d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x309d2f342a5d2b34; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslei_du (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c +new file mode 100644 +index 000000000..7b8ad7d5a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c +@@ -0,0 +1,254 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0xb9884ab93b0b80a0; ++ *((unsigned long *)&__m128i_result[0]) = 0xf11e970c68000000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000100010001; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00307028003f80b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040007fff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffc0ffffff81; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff008000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0060e050007f0160; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040007fff800000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fffffff80000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003ffd000a4000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffcffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fffd000a0000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf000800080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000a00028004000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6b9fe3649c9d6363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363bc9e8b696363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6b9fe3649c9d6363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363bc9e8b696363; ++ *((unsigned long *)&__m128i_result[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long *)&__m128i_result[0]) = 0x800000005b4b1b18; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80001b155b4b0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00006c82; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00009b140000917b; ++ *((unsigned long *)&__m128i_result[1]) = 0x80000000fffffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xb150000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff7e00000081; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x18e2184858682868; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff02d060; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff02d060; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff02d06000000000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000200000001c; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000020000000c0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000020000000c0; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c +new file mode 100644 +index 000000000..7a77e80c0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c +@@ -0,0 +1,293 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfd293eab528e7ebe; ++ *((unsigned long *)&__m128i_result[1]) = 0xf6e91c0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x51cfd7c000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff0ffe04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc39fffff007fffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0e7ffffc01fffffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000003f803f4; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff00ffff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfc00fcfc00fc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcfcfcfcfc00; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000060; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000f00f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000f00f; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000060000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x61608654a2d4f6da; ++ *((unsigned long *)&__m128i_result[1]) = 0xfee0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc2c00ca844a8ecb4; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x36); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf0003000f0003000; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff800fff01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff001ffe02; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd78cfd70b5f65d76; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5779108fdedda7e4; ++ *((unsigned long *)&__m128i_result[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff7fffffff7; ++ *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcdcfcfcfcdc; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xc0c0c0c0c0c0c0c0; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0c0c0c0c0c0c0c0; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_result[1]) = 0x89582bf870006860; ++ *((unsigned long *)&__m128i_result[0]) = 0x89582bf870006860; ++ __m128i_out = __lsx_vslli_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x841f000fc28f801f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x107c003c083c007c; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff9727ffff9727; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffe79ffffba5f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff972700000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffba5f00000000; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x20); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x101b0330eb022002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x030220020310edc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080800080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080008000; ++ __m128i_out = __lsx_vslli_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x317fce80317fce80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf0000000f0000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslli_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_result[1]) = 0x05dfffc3ffffffc0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000047fe2f0; ++ __m128i_out = __lsx_vslli_d (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c +new file mode 100644 +index 000000000..796e88cad +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c +@@ -0,0 +1,244 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000e0000000e0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fc00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000fc00; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffeb48e03eab7ebe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0fac01200f800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f80eac01f80ef80; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000e7e20468; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc2fac2fa53e7db29; ++ *((unsigned long *)&__m128i_result[1]) = 0xff84fff4ff84fff4; ++ *((unsigned long *)&__m128i_result[0]) = 0x00a6ffceffb60052; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x002e0059003b0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000005c000000b2; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007600000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x017001a002c80260; ++ *((unsigned long *)&__m128i_result[0]) = 0x01d8000000000000; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x09e009e009e009e0; ++ *((unsigned long *)&__m128i_result[0]) = 0x09e009e009e009e0; ++ __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040000000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0505000005050505; ++ *((unsigned long *)&__m128i_result[1]) = 0x0028280000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0028280000282800; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0000000000000; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffff00; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d001b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001918000017160; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001514000013120; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff60ca7104649; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff790a15db63d; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffc00ffde4000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfe857400fed8f400; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1c6c80007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0038d800ff000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fffe00fffffe00; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff800000000000; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000007fff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80ff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff80000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000001fffe; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040004000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010002000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000017fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x003fffffff800000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x57f160c4a1750eda; ++ *((unsigned long *)&__m128i_result[1]) = 0x000002bf8b062000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffd0ba876d000; ++ __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c +new file mode 100644 +index 000000000..5f46293dc +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c +@@ -0,0 +1,189 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f7f02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003f803f800100; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0014000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f807f807f807f80; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001030103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020006000200060; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080805; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080805; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020002000200014; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe0001fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000201fe01fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000201fe01fc; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff1affff01001fe0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff1aff6d02834d70; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f800d007f803680; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100418026803800; ++ __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007658000115de0; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a8960001d2cc0; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040600000406; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020202020202fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020200000202000; ++ *((unsigned long *)&__m128i_result[0]) = 0x002020000fe02000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000001ffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3131313131313131; ++ *((unsigned long *)&__m128i_result[1]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0313100003131000; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000900000009; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000090; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000090; ++ __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000020000007d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000001f400000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000280000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fef01000e27ca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001fde020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001c4f940000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ffffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ffffffff00; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000fffffffe000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000102020204000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86; ++ *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001ce28f9c0; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000004e06b0890; ++ __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c +new file mode 100644 +index 000000000..15c96ccfe +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c +@@ -0,0 +1,434 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007658000115de0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a8960001d2cc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ffff0000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff00ff0000ff; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000040100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000384; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe3f0200004003ffd; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff00ff00ff00; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f0101070101010f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000127f010116; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ffffffffff; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000ffef0010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000ff0000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff02000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000002a001a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a000b00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff001a00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffc0ffc0003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffc0ffc0003f003f; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff0000000000ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffefefffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff000086bd; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ca000000c481; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00050eb00000fffa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000f8a50000f310; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000011f0000f040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0177fff0fffffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff8bc; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001000100010c410; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x800000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ffffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op0[0]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001f5400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002008360500088; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000400028000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000467fef81; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c63636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4eede8494f000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0fff0fff7f800fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c +new file mode 100644 +index 000000000..e8d69f0e9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c +@@ -0,0 +1,236 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0007658000115de0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001a8960001d2cc0; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff00ffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffffff; ++ __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffff359f358; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffff00ff00; ++ __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x317fce80317fce80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fffe0000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007658000115de0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a8960001d2cc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000320; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007730; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000101010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4050000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636163636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000145ad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000300003e6e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00005dcbe7e830c0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03f21e0114bf19da; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5e695e95e1cb5a01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0313100003131000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000010a7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000046ebaa2c; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf1f1f1f1865e65a1; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00050eb00000fffa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000f8a50000f310; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c +new file mode 100644 +index 000000000..5bf3ce6e8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c +@@ -0,0 +1,328 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00feff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00feff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffff0000000000; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, 5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, 8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_b (__m128i_op0, -12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x807f7f8000ffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00feff00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0000ffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x195f307a5d04acbb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a3a3a3b3a3a3a3a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3a3a00003a3a0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_h (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5ff6a0a40ea8f47c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5ff6a0a40e9da42a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd82480697f678077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffe15; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffe15; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc000ffffc005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, -5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000100040001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00010002ffff0105; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa000308000008002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0500847b00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vslti_w (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000005e695e95; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5e695e96c396b402; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0103000201030002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, 7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, 14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_d (__m128i_op0, -13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c +new file mode 100644 +index 000000000..768df528f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c +@@ -0,0 +1,293 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff0000ffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff0000ffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000008a0000008a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000008900000009; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000ffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbe8282a0793636d3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x793636d3793636d3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000080000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7505445465593af1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00010000ffab001c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001ffffffadff9a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff7300000ca00430; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001a00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000009c83e21a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000022001818; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x975ca6046e2e4889; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000235600005486; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000b31600006544; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000007e8a60; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001edde; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0aa077b7054c9554; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40c7ee1f38e4c4e8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6b75948a91407a42; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0b5471b633e54fde; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vslti_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c +new file mode 100644 +index 000000000..fd7c22a82 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c +@@ -0,0 +1,344 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ed0008005e00a2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007a007600150077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0003000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0007007f03fe0000; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe001ffffe001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe001ffffe001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fc000003fc00000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fc000003fc00000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fffc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd83c8081ffff8080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x85bd6b0e94d89998; ++ *((unsigned long *)&__m128i_result[0]) = 0xd83c8081ffff8080; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbddaa86803e33c2a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_op1[0]) = 0xbddaa86803e33c2a; ++ *((unsigned long *)&__m128i_result[1]) = 0xff0600d50e9ef518; ++ *((unsigned long *)&__m128i_result[0]) = 0xffefffa8007c000f; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xaaaaffebcfb748e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfd293eab528e7ebe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefff6fff80002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe0404041e0404041; ++ *((unsigned long *)&__m128i_op1[0]) = 0x803f800080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000700ff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040004000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000700ff00000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000820000ff81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff810000ff81; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000820000ff81; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff810000ff81; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x800080007f008000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a0aa9890a0ac5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffff000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128i_op1[1]) = 0x21201f1e1d001b25; ++ *((unsigned long *)&__m128i_op1[0]) = 0x191817161514131d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001e8e1d8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000e400000001; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000080008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000fffe01fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000040002; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffac0a000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x801d5de0000559e0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x77eb86788eebafe1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffac00000000; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcfcfcfc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_result[0]) = 0x5252525252525252; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0802080408060803; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001fffe0001fff; ++ __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000; ++ __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000047fe2f0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fec20704; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000043fe2fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000001fffff; ++ __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c +new file mode 100644 +index 000000000..2ca4f0b7a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c +@@ -0,0 +1,258 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ca354688; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000007; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffc0ffff003f; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf6e91c0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x51cfd7c000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffd000700000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0014fff500000000; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3c600000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0f180000ffe00000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21f32eaf5b7a02c8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x407c2ca32cbd0357; ++ *((unsigned long *)&__m128i_result[1]) = 0x10f917d72d3d01e4; ++ *((unsigned long *)&__m128i_result[0]) = 0x203e16d116de012b; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x01ff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x01ff000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1268f057137a0267; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0048137ef886fae0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000490000004d; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffffe2; ++ __m128i_out = __lsx_vsrai_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ffffffffff; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffe80; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001800000039; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000049ffffffaa; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000060000000e; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000127fffffea; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0aa077b7054c9554; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40c7ee1f38e4c4e8; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_w (__m128i_op0, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fff3fff3fff3fff; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002ebf; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_w (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000190; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010058; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000100010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00f0001000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00f0001000000010; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c +new file mode 100644 +index 000000000..4e7c7ab7e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c +@@ -0,0 +1,290 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffe0001fffe; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0303020102020001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000201; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd82480697f678077; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0301020100000004; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff02; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3c5fffffff7fffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefffeff00feff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000f0080000f800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000e0180000e810; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000f0080000f800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000f0f800; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff00000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100089bde; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000104000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x80044def00000001; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000100f8100002; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0ff8006f0f950; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff7a53; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000bf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000002bb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000021e79364; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000718ea657431b; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfefffffffeffda6f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfefffffffeffe3d7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff0000ff86; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101fe870101fe87; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101fe8700000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x353c8cc4b1ec5b09; ++ *((unsigned long *)&__m128i_op1[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808000000035; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff80ff00ff80ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffe8081000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff000ff6220c0c1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffe8081000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xb110606000000000; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0037ffd40083ffe5; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001e0052001ffff9; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00df020f0078007f; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ffa2fff0ff74; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff76ffd8ffe6ffaa; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffc105d1aa; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffbc19ecca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffe03ff63ff9bf; ++ __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x06d9090909090909; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0039d21e3229d4e8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6d339b4f3b439885; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000db24848; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe3bfb01fe3bfe01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe03fe3ffe01fa21; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c +new file mode 100644 +index 000000000..92988035d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c +@@ -0,0 +1,246 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000b0000000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005000501800005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x438ff81ff81ff820; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000043; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x78); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002020202; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000009; ++ *((unsigned long *)&__m128i_op1[1]) = 0x697eba2bedfa9c82; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd705c77a7025c899; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x03fdfffcfefe03fe; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000010001000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff00ff00ffffff; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f0001000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1e0200001e020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0800080008000800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040004000400040; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffce00016fb41; ++ *((unsigned long *)&__m128i_op0[0]) = 0x57cb857100001a46; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfbffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7bffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000150000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffeffff001effff; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x1); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2020202020207fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x01010101010101ff; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff082f000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00005dcbe7e830c0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03f21e0114bf19da; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000003f200001e01; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000014bf000019da; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005fe0300010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100010001; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x62cbf96e4acfaf40; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf0bc9a5278285a4a; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x62cbf96e4acfaf40; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x40); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffb6d01f5f94f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001f50000; ++ __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x808080e280808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080636380806363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080808080638063; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x63); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f07697100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000076971000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000003020302; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff81; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x58); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128i_op0[0]) = 0x110053f401e7cced; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5847b72626ce61ef; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005847b00011005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005847b00000000; ++ __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c +new file mode 100644 +index 000000000..6a842d9ce +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c +@@ -0,0 +1,354 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffff02fff4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000003f; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80010001b57fc565; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8001000184000be0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x80010001b57fc565; ++ *((unsigned long *)&__m128i_result[0]) = 0x8001000184000be0; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000958affff995d; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0fffff000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000bf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000002bb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0fffff000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff6080ffff4417; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000003a0000003a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003a0000003a; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0086000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0082000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0086000000040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0082000000000007; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x467f6080467d607f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0037ffc8d7ff2800; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff00000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_result[1]) = 0x001bffe4ebff9400; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80000000000000; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000004442403e4; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100010001000100; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc00000ff800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7ffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffe4866c86; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe4866c86; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000002000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000002000000; ++ __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1748c4f9ed1a5870; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc89d7f0ff90da019; ++ *((unsigned long *)&__m128i_op1[1]) = 0x680485c8b304b019; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc89d7f0ff90da019; ++ *((unsigned long *)&__m128i_result[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff913bfffffffd; ++ __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c +new file mode 100644 +index 000000000..2a353d65a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c +@@ -0,0 +1,265 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000cb4a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000f909; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf4b6f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff5fff4002ffff5; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc0ff81000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff0ffe04000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000f3; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000f3; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fdfc0000fd03; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000017161515; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000095141311; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000109000000c9; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6; ++ *((unsigned long *)&__m128i_result[1]) = 0x00f0008100800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x00f0008000800080; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000006c80031; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001200100012001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080000000800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000404040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001340134013401; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001340134013401; ++ __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrari_h (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c +new file mode 100644 +index 000000000..60d474203 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c +@@ -0,0 +1,236 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffefffffffef; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefff6fff80002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000000fefb0000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xc2f9bafac2fac2fa; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0204; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000001d5d4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000150d707009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x03f1e3d28b1a8a1a; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffefffefffeffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefffefffeffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff7f810100001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001fffc0ffffe001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000002259662; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc4dbe60354005d25; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f01000000f8ff00; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff6ff4ffff8db8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffbaf4ffffb805; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff4ffb800ff0080; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000044470000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00004dce00004700; ++ __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0b4c600000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x08080807f5f5f5f8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0202f5f80000ff00; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0d060d060d060d06; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0d060d060d060d06; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff01fe03ff01fe03; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01fe03ff01fe03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01fe03ff01fe03; ++ __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c +new file mode 100644 +index 000000000..3aa23bdc8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff020000fff4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff020000fff4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080007f80800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000001e5; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x5000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff8000002f4ef4a8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000f4a8; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00100184017e0032; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0086018c01360164; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffff33c4b1e67; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800c0004300c; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000e0000000e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x66); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0020808100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x028c026bfff027af; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000003fc03fc00; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc00a3009b000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ffa7f8ff81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000003f0080ffc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007fff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000a7f87fffff81; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffd400000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000004000000040; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000080003f80ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000001fc00000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff80010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1ffffffff8001000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf0bd80bd80bd8000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xecec006c00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xecec006c00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff007f00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001ff85ffdc0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000332ae5d97330; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1ff85ffe2ae5d973; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000043c5ea7b6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000008fc4ef7b4; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000fea0000fffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x48); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x09e8e9012fded7fd; ++ *((unsigned long *)&__m128i_op0[0]) = 0x479f64b03373df61; ++ *((unsigned long *)&__m128i_op1[1]) = 0x04c0044a0400043a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x04c004d6040004c6; ++ *((unsigned long *)&__m128i_result[1]) = 0x1d20db00ec967bec; ++ *((unsigned long *)&__m128i_result[0]) = 0x00890087009b0099; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000080800000808; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080000180800001; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000003e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe00fe000200fe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe000200fe; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000003e; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefe02fefefe02fe; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000000010000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0103000201030002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc000400000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00003fff00010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff010000ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xf359f359f359f359; ++ *((unsigned long *)&__m128i_result[0]) = 0xf359f359f359f359; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01533b5e7489ae24; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xce9135c49ffff570; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000807bf0a1f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000800ecedee68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0005840100000005; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0005847b00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001f0a20001cedf; ++ *((unsigned long *)&__m128i_result[0]) = 0x0058000000580000; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffb1fb1000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf2c97aaa7d8fa270; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0b73e427f7cfcb88; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0a545374471b7070; ++ *((unsigned long *)&__m128i_op0[0]) = 0x274f4f0648145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0xa8a736e19e9e28bf; ++ *((unsigned long *)&__m128i_result[0]) = 0x9e9f9e9f9e9f9e9f; ++ __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c +new file mode 100644 +index 000000000..f9c789855 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c +@@ -0,0 +1,389 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffefffffffef; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000005555555554; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001000f000e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000fff1000ffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002a55005501; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000002a55000001; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000000fff8fff8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f800000fff8fff8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f800000fff80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x80000000fff80000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0004000000040000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff6fff6fff6fff6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000750500006541; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000100fffffefd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00f900d7003d00e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003e00d100de002b; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f4000007f040000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f0200007f020000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe000000f6; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x01010101ffffff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x01010101000000f6; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000049000000c0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffff29; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffff7f00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff007f0101017f; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff2900000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000401000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff2900000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc2f9bafac2fac2fa; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0804080407040804; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000010a000b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101080408040804; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100810080e081; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4688500046f6a000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f8000004f7fff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ffffff03ffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00013fff; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000021ffffffdf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000e60; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0202fe02fd020102; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0400040004000400; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101fe870101fe87; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101fe8700000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x61608654a2d4f6da; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000fb01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000007000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fb01; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000e0000; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff003000ff00a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000100ff00fe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff003000ff00a0; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffe0000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffe0000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000001fd02; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffe1fffffff; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000900000009; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff7fffffff7f; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff007fff810001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000400530050ffa6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff800fff01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000007ff000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000056f64adb9464; ++ *((unsigned long *)&__m128i_op1[0]) = 0x29ca096f235819c2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000004399d32; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff; ++ __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c +new file mode 100644 +index 000000000..7b5e9a7bf +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c +@@ -0,0 +1,328 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffff0001ffff; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000020000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000100000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000080000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000017f0a82; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x03ff03ff03ff03ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000400000004000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000400000204010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000020000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000020000010200; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000003fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003fffffff; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x37); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020002000200020; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffefffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0007000700070007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0007000700070007; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000c000c000c000c; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000003d0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000003d0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000030000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000030000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xaa14efac3bb62636; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd6c22c8353a80d2c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000300000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003000000010000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3fff3fff3fff3fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000700000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffbffda; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010101; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x000001fffdfffdff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000001fffdfffdff; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_result[0]) = 0x001f2f2cab1c732a; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000290; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000290; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020000ffff0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000003030000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000002345454; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0dec4ca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000060006; ++ __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000200000000d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003e0000003f; ++ __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c +new file mode 100644 +index 000000000..5a8f4f70a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c +@@ -0,0 +1,335 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000c77c000047cd; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000c0f100006549; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffdfff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffdfff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe00001ffe200; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffdfff; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff35cab978; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff35cab978; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010035; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x80307028ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8040007fffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0101ff010101; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4180418041804180; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00000000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00008bf700017052; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000f841000091aa; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe6d4572c8a5835bc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe5017c2ac9ca9fd0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000f8410000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001010001; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000100000001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0ed5ced7e51023e5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001000e51023e5; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffbfff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010001; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000017ffeffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000017ffeffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x379674c000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3789f68000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfefeff00fefeff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00c0000000800000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000071768fa4; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffdfdc0d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffdfdc0d; ++ __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002427c2ee; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c +new file mode 100644 +index 000000000..ca462c834 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c +@@ -0,0 +1,281 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc7fc00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003fe00ffe3fe0; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x7b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc39fffff007fffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x78c00000ff000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x61cf003f0000007f; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000003c607f80; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff7f01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff7f01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffe03; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffe03; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff8001ffff8001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fffefffefffef; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363797c63990099; ++ *((unsigned long *)&__m128i_op0[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363797c63990099; ++ *((unsigned long *)&__m128i_op1[0]) = 0x171f0a1f6376441f; ++ *((unsigned long *)&__m128i_result[1]) = 0x181e180005021811; ++ *((unsigned long *)&__m128i_result[0]) = 0x181e180005021811; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003fff00003fff; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf0fd800080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000a00028004000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000f000800000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x000f000000000000; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_op1[1]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_op1[0]) = 0xaeaeaeaeaeaeae35; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00000000; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000008140c80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002050320; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x010101017f010101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040600000406; ++ *((unsigned long *)&__m128i_result[0]) = 0x020202020202fe02; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe364525335ede000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000fff00000e36; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x601fbfbeffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfff8000000000000; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000455555555; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7c7c000000007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000001f1f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000bffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000040001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe4c8b96e2560afe9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc001a1867fffa207; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe4c8b96e2560afe9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc001a1867fffa207; ++ *((unsigned long *)&__m128i_result[1]) = 0xe2560afe9c001a18; ++ *((unsigned long *)&__m128i_result[0]) = 0xe2560afe9c001a18; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000042ab41; ++ *((unsigned long *)&__m128i_op0[0]) = 0xb1b1b1b1b16f0670; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000044470000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000080c43b700; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x30eb022002101b20; ++ *((unsigned long *)&__m128i_op1[0]) = 0x020310edc003023d; ++ *((unsigned long *)&__m128i_result[1]) = 0x022002101b200203; ++ *((unsigned long *)&__m128i_result[0]) = 0x022002101b200203; ++ __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c +new file mode 100644 +index 000000000..211339bb8 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c +@@ -0,0 +1,434 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x43e092728266beba; ++ *((unsigned long *)&__m128i_op1[0]) = 0x43d8969cc4afbf2d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc001fffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff8000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000200020002; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffff0ffe04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000101fd01fe; ++ *((unsigned long *)&__m128i_result[1]) = 0xff80ff80ff80ff80; ++ *((unsigned long *)&__m128i_result[0]) = 0xff80ff8080008000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff51cf8da; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffd6040188; ++ *((unsigned long *)&__m128i_result[1]) = 0x00020002000d0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000020f2300ee; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007f8000007f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000003fc; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000003fc; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000006; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0040000000400000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0040000000400000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0020808100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe218ffffea10; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000f00f; ++ *((unsigned long *)&__m128i_result[1]) = 0x111110ff11111141; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111113111111100; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000100; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfeca2eb9931; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00d3007c014e00bd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x06e1000e00030005; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0202020202020202; ++ *((unsigned long *)&__m128i_op0[0]) = 0x363d753d50155c0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe500c085c000c005; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe5c1a185c48004c5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002020002020200; ++ *((unsigned long *)&__m128i_result[0]) = 0x021f3b0205150600; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffe000ffdf; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffe080f6efc100f7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xefd32176ffe100f7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000040000000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000040000000000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffdfe01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffdfe0200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4000000000000000; ++ __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_op0[0]) = 0xa352bfac9269e0aa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xd70b30c96ea9f4e8; ++ *((unsigned long *)&__m128i_result[0]) = 0xa352bfac9269e0aa; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000158; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00009c7c00007176; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffeff98; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0014ffe4ff76ffc4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x084d1a0907151a3d; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff9fffefff9ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0280000000000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0700f8ff0700f8ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0700f8ff0700f8ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000090a00000998; ++ *((unsigned long *)&__m128i_result[1]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000ef0000000003b; ++ __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0005847b00011005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0005847b00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000807bf0a1f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000800ecedee68; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005840100000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005847b00000000; ++ __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00250023001c001d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x309d2f342a5d2b34; ++ *((unsigned long *)&__m128i_result[1]) = 0x00060eb000000006; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000075c00000cf0; ++ __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c +new file mode 100644 +index 000000000..2c3a53416 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c +@@ -0,0 +1,300 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0005252800052528; ++ *((unsigned long *)&__m128i_result[0]) = 0x0005252800052528; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0200020002000200; ++ *((unsigned long *)&__m128i_result[0]) = 0x0200020002000200; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc001fffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000200000; ++ *((unsigned long *)&__m128i_result[0]) = 0x001fff8004000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000030000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00060001fffe8003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000200010; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000078c00000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000078c00000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000400000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000040004000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001800390049ffaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0029ff96005cff88; ++ *((unsigned long *)&__m128i_result[1]) = 0x001800390049ffaa; ++ *((unsigned long *)&__m128i_result[0]) = 0x0029ff96005cff88; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000005151515; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000006302e00; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2000200000013fa0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000013fa0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000020000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000020000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000dc300003ffb; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000dc300003ffb; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808000000035; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00018d8e00018d8e; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000003fc00ff00; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001fe01fe00; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x045340a628404044; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001030103; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9611c3985b3159f5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0021b761002c593c; ++ *((unsigned long *)&__m128i_result[0]) = 0x002584710016cc56; ++ __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_result[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_result[0]) = 0xbbc8ecc5f3ced5f3; ++ __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000080801030000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080103040000; ++ __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c +new file mode 100644 +index 000000000..c630b4261 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c +@@ -0,0 +1,164 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001ffff0001ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000efffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040400000383; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffff1fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000383ffff1fff; ++ __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003fc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000003fc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcbc2723a4f12a5f8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffd60001723aa5f8; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x467f6080467d607f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808081; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe000e0006080b040; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010101030101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101030101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000fffa0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffa0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101000101010001; ++ __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80ffffffffff80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff80ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long *)&__m128i_op1[0]) = 0x17483c07141b5971; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800010001ff8000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff01ff01ac025c87; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff01ff01ac465ca1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c +new file mode 100644 +index 000000000..468a17c15 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c +@@ -0,0 +1,686 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff8969ffffd7e2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000d688ffffbd95; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf12dfafc1ad1f7b3; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000200000002000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000c0002000c0002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000400c600700153; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000c0002000c0002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000400c600700153; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000010000007f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0800000400000800; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001515151500; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001515151500; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001515000015150; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fdfd0404; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fffffff3fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fffffff3fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fc08; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000fc08; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffba420000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000007e044000400; ++ *((unsigned long *)&__m128i_result[0]) = 0xfdd2100000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000081e003f3f3f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3f3f3f0e00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000081e003f3f3f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f3f3f0e00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000103c007e7e8; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000103c007e7e8; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x43); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0202022302023212; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0202ff3f02022212; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002100003010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff3f00002010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x79); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe2bb5ff00e20aceb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe2bb5ff00e20aceb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00e3000e00e3000e; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf58df7841423142a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f7477f8ff4e2152; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3d3e0505101e4008; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2bd5d429e34a1efb; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfc0203fccbedbba7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc9f66947f077afd0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x89fed7c07fdf5d00; ++ *((unsigned long *)&__m128i_result[1]) = 0x14f1a50ffe65f6de; ++ *((unsigned long *)&__m128i_result[0]) = 0xa3f83bd8e03fefaf; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6ed694e00e0355db; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000010600000106; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xe00e035606000001; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xe739e7ade77ae725; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbb9013bd049bc9ec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x56aca41400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7ade77ae3bd049bd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000041400000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1010101010101010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8081808180818081; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000006ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0037f80000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x69); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0080808080c04040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0101010001808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000202000008081; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001010100010101; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00fff00000001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x6b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000adf0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001e00; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0040000000400040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000020002020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001010102; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001000100010000b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03fc03fc03fc03fc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x04000400ff01ff01; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1010101010101010; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000fff800000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001ed68; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ff6a09e667f3bd8; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007b5a; ++ *((unsigned long *)&__m128i_result[0]) = 0x999fcef600000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffe5c8000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x91f80badc162a0c4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x99d1ffff0101ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff400000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x905d0b06cf0008f8; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3802f4fd025800f7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc8ff0bffff00ffae; ++ *((unsigned long *)&__m128i_op1[0]) = 0x91ff40fffff8ff50; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000200000000700; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000192000001240; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff0ffd0ffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff0ffc0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbb7743ca4c78461f; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd9743eb5fb4deb3a; ++ *((unsigned long *)&__m128i_result[1]) = 0x003fffffffc3ff44; ++ *((unsigned long *)&__m128i_result[0]) = 0x002eddd0f2931e12; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x4a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbb7743ca4c78461f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd9743eb5fb4deb3a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x22445e1ad9c3e4f0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1b43e8a30a570a63; ++ *((unsigned long *)&__m128i_result[1]) = 0x743ca4c843eb5fb5; ++ *((unsigned long *)&__m128i_result[0]) = 0x45e1ad9c3e8a30a5; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1204900f62f72565; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4901725600000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x6a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000400000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000300000003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f3f3f7fbf3fffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x47); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000040804080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000020100000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffe8ffff28fc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00007fff0000803e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000006ffff81e1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0ffffffe8ffff290; ++ *((unsigned long *)&__m128i_result[0]) = 0x000007fff0000804; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x44); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000418200000008e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002100047; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636362; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636362; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636362; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636362; ++ *((unsigned long *)&__m128i_result[1]) = 0x0032003200320032; ++ *((unsigned long *)&__m128i_result[0]) = 0x0032003200320032; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff01010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ffdf87f0b0c7f7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf6b3eb63f6b3f6b3; ++ *((unsigned long *)&__m128i_op1[0]) = 0x363953e42b56432e; ++ *((unsigned long *)&__m128i_result[1]) = 0x010000010080000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x00f700f70036002b; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xed67d6c7ed67ed67; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6c72a7c856ac865c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000700000003; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff40ff83; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1010101010101010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000003030103; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000003030103; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000006060; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000006060; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000002408beb26c8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000706e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000028c27; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000070; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x80000b0b80000b0b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000101080001010; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffefefffffeff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0061006100020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00fe; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000078087f08; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000078087f08; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000e0fc0000e0fc; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff0bff76; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x75); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff00ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff00ffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8282828282828282; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000828282828282; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0008000800000008; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00f7000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000005150; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000005150; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000f7000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x24); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41afddcb1c000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd09e1bd99a2c6eb1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe82f7c27bb0778af; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000040002; ++ *((unsigned long *)&__m128i_result[0]) = 0x000d000a000f000c; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff8000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffdff0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0144329880000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007fffc0007ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x004000004c400000; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001e0000001e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffafff0fff9ff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000d800cff8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000002000007d7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000300000ff1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x000007d700000ff1; ++ __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fc000007fc00000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff00ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000ff8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000; ++ __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x74); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000f08; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x2020202020202020; ++ __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c +new file mode 100644 +index 000000000..e45ca36f0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c +@@ -0,0 +1,390 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f00000000003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffc000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffefffffffeff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffcff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x02b504f305a5c091; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x02b504f305a5c091; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000005602d2; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000003f80b0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xb327b9363c992b2e; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa1e7b475d925730f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000001ff00; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0060e050007f0160; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040007fff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1268f057137a0267; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0048137ef886fae0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x75b043c4d17db125; ++ *((unsigned long *)&__m128i_op1[0]) = 0xeef8227b4f8017b1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000006f00000000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffd000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff994db09c; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc7639d96; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0fff0fff0fff0fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x9); ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f80000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x800080007f008000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000695d00009b8f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000074f20000d272; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001f5400000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00010000fffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00010000fffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x31b1777777777776; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6eee282828282829; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000006362ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff801c9e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000810000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x40eff02383e383e4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000007fff; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffb00fdfdf7ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000c0c00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000a74aa8a55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6adeb5dfcb000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a7480007fff8000; ++ __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000fe00fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fe00fe00fe00fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000007500; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00007e1600007d98; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00fe00fe7fffffff; ++ __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f007f7f7f00; ++ __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c +new file mode 100644 +index 000000000..7ffcecde7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c +@@ -0,0 +1,679 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000007f7f02; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff7fffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffff7ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x64); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x47); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0004007c00fc0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f7f7f7f00107f04; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f0000fd7f0000fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00cf01fe01fe01fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000301de01fe01fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f00000000000000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe31c86e90cda86f7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000e3; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc39fffff007fffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff0e700000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080000180800100; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x82c539ffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x23); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000c0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffff29; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000020000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000183fffffe5; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000080000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fefefe6a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000fbf9; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000007f8; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a000a000a000a00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x4d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f007f007f007f00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0003003f; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x4c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x007b01ec007b3a9e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fff9fff9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fff9fffa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007ffe7ffe400000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x2a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc485edbcc0000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000c485; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x21011f3f193d173b; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff39ff37ff35ff33; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000015d926c7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000e41b; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0c0c0c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0014000100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00003f80000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff46; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x4c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffee00000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3a3a3a3b3a3a3a3a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3a3a00003a3a0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000003a0000003a; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000068; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000038003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000040033; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00001fff00001fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000007ffc000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000fff0; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000004000000040; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000005e94; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005e96ffffb402; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000bd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001fc0000fffeff; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000002fffffffb; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000fffb; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000bffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x42); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x79); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000777777777777; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff7777ffff7777; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000003bbbbbbbbbb; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x45); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0007fff800000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6a5d5b056f2f4978; ++ *((unsigned long *)&__m128i_op1[0]) = 0x17483c07141b5971; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xd4bade5e2e902836; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0010001000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1000000010001000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00680486ffffffda; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff913bb9951901; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67157b5100005000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x387c7e0a133f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0c0f000a070f0204; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e3faa293c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x86dd8341b164f12b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9611c3985b3159f5; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xff86dd83ff9611c3; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1010111105050000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040000041410101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000808000020200; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2e2b34ca59fa4c88; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3b2c8aefd44be966; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x2e34594c3b000000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff1afffefec0ec85; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff1aff6d48ce567f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80c400000148; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff80c1ffffe8de; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe3ffd8ffe30919; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffffffff; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1313131313131313; ++ *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_op1[0]) = 0xd73691661e5b68b4; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x084d1a0907151a3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000007d07fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000014eb54ab; ++ *((unsigned long *)&__m128i_op1[0]) = 0x14eb6a002a406a00; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0001fffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x60); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffaf1500000fffa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000f8a40000f310; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000003e2; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128i_op0[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x50); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c +new file mode 100644 +index 000000000..a23ad7cd2 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c +@@ -0,0 +1,669 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffd24271c4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2711bad1e8e309ed; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xbf8000000000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcf00000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0403cfcf01c1595e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x837cd5db43fc55d4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff80007fff; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffcb410000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffeb827ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc1bdceee242070db; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe8c7b756d76aa478; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefd7f7f7f7f7f7e; ++ *((unsigned long *)&__m128i_op0[0]) = 0xdffdbffeba6f5543; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ffffff000000ff; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ffffff000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000002010; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff00000000000001; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc1bdceee242070db; ++ *((unsigned long *)&__m128i_op1[0]) = 0xe8c7b756d76aa478; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fffff00; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000003fffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3f8000003f800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000007ae567a3e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000700ff00000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x006f0efe258ca851; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffff00; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000f00f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000007fff; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207f7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff0000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111311111114111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111311111110000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f417f417f027e03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9780697084f07dd7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x87e3285243051cf3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fea8ff44; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fea8ff44; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000008000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op0[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_op1[1]) = 0x13f9c5b60028a415; ++ *((unsigned long *)&__m128i_op1[0]) = 0x545cab1d81a83bea; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff0015172b; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc631eb3339ce; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d197a98f2e; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000e36400015253; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000035ed0001e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000e36400015253; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000035ed0001e000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1c6c80007fffffff; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000b4a00008808; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0808080800000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc2fc0000c3040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc2fc0000c3040000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000060000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000060000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0600000100000001; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080008000800080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080006b00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000500000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff0000ff; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000002ffffffff; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000045340a6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000028404044; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000fffffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000102020204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x045340a628404044; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400000014; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000adad0000adad; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000052520000adad; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd6a09e662ab46b31; ++ *((unsigned long *)&__m128i_op0[0]) = 0x34b8122ef4054bb3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9b509be72f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3513f2e3a1774d2c; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000501ffff0005; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0021b761002c593c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x002584710016cc56; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff0000ffff; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00020000ffff0001; ++ __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x004001be00dc008e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1f3f06d4fcba4e98; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e1135681fa8d951; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000007d07fffffff; ++ __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000008686; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00008e5680008685; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007fff7fff8000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc7f100004000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c7f14000; ++ __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4500000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4400000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xff000000ff000000; ++ __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8a8a8a8a8a8a8a8a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8a8a8a8a8a8a8a8a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c +new file mode 100644 +index 000000000..76fac97be +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c +@@ -0,0 +1,848 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff60090958; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0fa96b88d9944d42; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001802041b0013; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x72); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0200020002000200; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x3f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x5c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000000020000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xda4643d5301c4000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc1fc0d3bf55c4000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7505853d654185f5; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010000fefe0101; ++ *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00020002000d0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000020f2300ee; ++ *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x79); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000073; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000010000002b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000400000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01ff01ff01ff01ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x59); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000f0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001800390049ffaa; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0029ff96005cff88; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03c0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x03c0038000000380; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f0000000f000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0bef0b880bd80bd8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0bd80bd80bdfffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0bd80bd80bd80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000017b017b01; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffe0001fffe0001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x30); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf0800320fff1fa20; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0032000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f417f417f027e03; ++ *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x60); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000065a0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9941d155f43a9d08; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c0c8b8a8b8b0b0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8b8a8a898a8a8909; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1817161517161514; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1615141315141312; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0fffff000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffe00000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010001000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000080000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x58); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100fe000100fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0d1202e19235e2bc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xea38e0f75f6e56d1; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe500ffffc085; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffc000ffffc005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff00000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100080000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0400400204004002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffe080f6efc100f7; ++ *((unsigned long *)&__m128i_op0[0]) = 0xefd32176ffe100f7; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffe080f6efc100f7; ++ *((unsigned long *)&__m128i_op1[0]) = 0xefd32176ffe100f7; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x2c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005452505; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000004442403e4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x03fc03fc03fc03fc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000b4a00008808; ++ *((unsigned long *)&__m128i_result[0]) = 0x0808080800000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x71); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x2ea268972ea2966a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4026f4ffbc175bff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x5d7f5d807fea807f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff0fffffff00001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff0fffffff09515; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000003000000d612; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000bfffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000500000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80808080806b000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000c0c0c000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe1fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7ffffffb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000080008; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1ab6021f72496458; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7750af4954c29940; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1ab6021f72496458; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7750af4954c29940; ++ *((unsigned long *)&__m128i_result[1]) = 0x6ad8ffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x6ad8ffffffffffff; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002008300500088; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000088; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x2d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1200091212121212; ++ *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000008000000080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x51); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffeff98; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0014ffe4ff76ffc4; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000011; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff86dd83ff9611c3; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000035697d4e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000013ecaadf2; ++ *((unsigned long *)&__m128i_result[1]) = 0xe280e67f00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00007f80; ++ __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x017001a002c80260; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01d8000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf02596848; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf04581ec0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x010169d9010169d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0x01010287010146a1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200000001; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_op1[0]) = 0x004d004d004d004d; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x06d9090909090909; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x48); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0039d21e3229d4e8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6d339b4f3b439885; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffff000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0000000000000; ++ __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x2e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000100000001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x37b951002d81a921; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000075dbe982; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000071e48cca; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0ebb7d300e3c9199; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000930400008a10; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00006f9100007337; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00c2758000bccf42; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00a975be00accf03; ++ *((unsigned long *)&__m128i_result[1]) = 0x00250023001c001d; ++ *((unsigned long *)&__m128i_result[0]) = 0x309d2f342a5d2b34; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff01ffffe41f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfff00000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000155; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000002b; ++ __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfee1f6f18800ff7f; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c +new file mode 100644 +index 000000000..ed600c72d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c +@@ -0,0 +1,543 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8080808000008080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080000080800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x5ff6a0a40ea8f47c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x5ff6a0a40e9da42a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00003ff000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fffc00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001afffffff7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000750500006541; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000100fffffefd; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff6fc00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7f0000007f000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080000180800100; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ff00ffff; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x7); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefff6fff80002; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101017f0101017f; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00005a5a00005a5a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005b5a00005b5a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x65b780a2ae3bf8ca; ++ *((unsigned long *)&__m128i_op1[0]) = 0x161d0c373c200827; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000001ff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf10cf508f904fd01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf8f8e018f8f8e810; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf8f8f008f8f8f800; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000f0009d3c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000016fff9d3d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000c000000060003; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffe0001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00003a247fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000003fbf3fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7ff8; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x3fbf3fbf00007fff; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000fff00000e36; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000fff0e36; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffe000ffdf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0018; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffefffefffefffef; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0080000700000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffbffda; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3e25c8317394dae6; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ac00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6c6c6c6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x64616462b76106dc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x64616462b71d06c2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00c0c000c0000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0000000c000c000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00c0c000c0000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0000000c000c000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001e001e001e001e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001700000017; ++ *((unsigned long *)&__m128i_op1[0]) = 0x59f7fd8759f7fd87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff7fff; ++ __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffc0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000001; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00007fff7fff8000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000007f7f7f; ++ __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf589caff5605f2fa; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000a74aa8a55ab; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6adeb5dfcb000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c +new file mode 100644 +index 000000000..613668143 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c +@@ -0,0 +1,668 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001ffff00000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x4f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004e005500060031; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff870068fff5ffb3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x004e005500060031; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff870068fff5ffb3; ++ *((unsigned long *)&__m128i_result[1]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x04e00060ffffffff; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808000008080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080000080800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001010100010100; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x2f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000080007f80800; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00047fff00007fff; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff0000ff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x01fc020000fe0100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000003fc0003; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000017fda829; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x342caf9bffff1fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040000000400; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c037fff342c7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000fff8fff8; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fff80000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x37); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff100fffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff100fffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff100fffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffff800; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x4b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a000a000a000a00; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf2f2e5e5e5e5e5dc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000003fc0; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x22); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x35); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0008000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000083b00000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x33); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x7e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1ff85ffe2ae5d973; ++ *((unsigned long *)&__m128i_op1[1]) = 0x403be000ffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000ffc2f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00201df000000000; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000005151515; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000006302e00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000003f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f417f417f027e03; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001fd0; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x32); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffbfffffffbf; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffff7f; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x5f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000202fe02; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x01203f1e3d1c3b1a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3918371635143312; ++ *((unsigned long *)&__m128i_op1[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x480f7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005dcbe7e830c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000001fffff59; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x63); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007f41; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000002000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x39); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x685670d27e00682a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x14ccc6320076a4d2; ++ *((unsigned long *)&__m128i_op1[0]) = 0x685670d27e00682a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc000000fc0003fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbffffff0ffffc00f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000003f0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffc3ffff003e; ++ *((unsigned long *)&__m128i_result[1]) = 0x00c0000000bfffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffffff; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x800000810000807f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x808080010080007f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x800000810000807f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x808080010080007f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000020000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000020000020; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x62); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0400400204004002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000002002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2a29282726252423; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2221201f1e1d1c1b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00a8009800880078; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000807f00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x80006b0080808080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff7fff; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000001010101; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000001fe01; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000001fe01; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000f0000000f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f0f0f0f00000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x555500adfffc5cab; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010100000100; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03ff0101fc010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03fffffffc010102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000300037ff000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000007070707; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x45); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffcfffdfffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffcfffdfffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000053a4f452; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000053a; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000b3a6000067da; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00004e420000c26a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x7a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x67eb85afb2ebb000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x38); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7c7c000000007176; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x3e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000c6c7; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8d8d8d8d8d8cc6c6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x3c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000aa822a8228222; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03aa558ec8546eb6; ++ *((unsigned long *)&__m128i_op1[1]) = 0x001a64b345308091; ++ *((unsigned long *)&__m128i_op1[0]) = 0x001f2f2cab1c732a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0155ffff754affff; ++ *((unsigned long *)&__m128i_result[0]) = 0x034cffff03e5ffff; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc1bdceee242070dc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xe907b754d7eaa478; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x5); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0002711350a27112; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00d5701794027113; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000203000010d0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffc00300000220; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000000900; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000090900000998; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x20); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000001000010f8; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f0f0f0f00000f00; ++ __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c +new file mode 100644 +index 000000000..ec688bb12 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c +@@ -0,0 +1,470 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff7fff; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00040003ff83ff84; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00040003ff4dffca; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000002020202; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffbe6ed563; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff732a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000fbf9; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000007f00000000; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000004fc04f81; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000004fc04f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00007f7f; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc1000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffc1000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff000000007fff; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffff0000000ad3d; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff000fffff000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf001f0010101f002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000dfa6e0c6; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80df00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00007f7f; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff3fbfffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fbf3fbf00007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f7f7f01027f02; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3f413f4100000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f801fe000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000100000000fc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0100000001000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000040a04000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00123fff00120012; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0012001200120012; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00003fff00010000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x1200091212121212; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0800010001ff8000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x2e9028362e902836; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2e9028362e902836; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000010; ++ __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x33f5c2d7d975d7fe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000024170000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002711350a27112; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00d5701794027113; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0674c886fcba4e98; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfdce8003090b0906; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff001a00000000; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001000000010; ++ __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001fffe00014b41; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe0001ffde; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0002000100020002; ++ __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c +new file mode 100644 +index 000000000..02f7ca08b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c +@@ -0,0 +1,597 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x3d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x8080808000008080; ++ *((unsigned long *)&__m128i_result[0]) = 0x8080000080800000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007f00; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0101010400100203; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0103010301020109; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000110000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000007f00000004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0202000402020202; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000200000010000; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x56); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x6d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0001ffff8002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0010000400020004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff20ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffc0020ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x07fff80000008000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000007ffe001; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_d_q (__m128i_op0, __m128i_op1, 0x7c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x03574e3b94f2ca31; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000001f807b89; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000005050000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0505000005050505; ++ *((unsigned long *)&__m128i_result[1]) = 0x000d02540000007e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400140014; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x41); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x3b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x56a09e662ab46b31; ++ *((unsigned long *)&__m128i_op1[0]) = 0xb4b8122ef4054bb3; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x02b504f305a5c091; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x37); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000d000d000d000d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000d000d000d000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000680000006800; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x25); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000400; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005555aaabfffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffffff000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000000ab; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x43); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff7fff; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000080; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x18); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x34); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000004f804f81; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000004f804f80; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001400000014; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff81007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffb7005f0070007c; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff80007e028401; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9a10144000400000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000001ffff00010; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x5b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x29); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000040000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffff9cff05; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff9cfebd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff7ffffef77fffdd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf77edf9cffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001fffff001fffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x21201f1e1d1c1b1a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1918171615141312; ++ *((unsigned long *)&__m128i_result[1]) = 0x10ff10ff10ff10ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long *)&__m128i_op0[0]) = 0x061202bffb141c38; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x010101fe0101fe87; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000004000000002; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffd60001723aa5f8; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007f007f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x808080e280808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080636380806363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x808080e280808080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080636380806363; ++ *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004; ++ *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1d); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000d0000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000dffff000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000070007; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000007ffff; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800c00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000007fff7fff; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff0100ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0607060700000807; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0707f8f803e8157e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x31); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x21); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xc); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc0808000c0808000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000003020302; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x16); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffffffe00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x3a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc0800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000008080600; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op0[0]) = 0x52525252adadadad; ++ *((unsigned long *)&__m128i_op1[1]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x800000007fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x003ef89df07f0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003ec0fc0fbfe001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff800ff2fe6c00d; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff40408ece0e0de; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0xa); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000400040004000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x12); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000ff960001005b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffa500010003; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[0]) = 0x0020000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x2b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1748c4f9ed1a5870; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff; ++ __m128i_out = __lsx_vssrlrni_d_q (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0010001000000010; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010; ++ __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000001f0000001f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4000000040000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x27); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x28); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x26); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x117d7f7b093d187f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe1bfefe00011ee1; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe1bfe6c03824c60; ++ *((unsigned long *)&__m128i_result[1]) = 0x7f7f7f7f0000001a; ++ *((unsigned long *)&__m128i_result[0]) = 0x7f7f017f7f7f7f7f; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff3a81ffff89fd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffb3c3ffff51ba; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0802080408060803; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xd); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff00ffffff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000900ffff98; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff; ++ __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xc); ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000056000056; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3a8000003a800000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000efffefff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xa03aa03ae3e2e3e2; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_d_q (__m128i_op0, __m128i_op1, 0x75); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000760151; ++ *((unsigned long *)&__m128i_op0[0]) = 0x003e0021009a009a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000003e2427c2ee; ++ *((unsigned long *)&__m128i_result[1]) = 0x00001e5410082727; ++ *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00107f7f; ++ __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000f1384; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000000004ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f8000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c +new file mode 100644 +index 000000000..fc4cbb4e5 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c +@@ -0,0 +1,398 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001801f0307f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001801f0307f80; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_result[1]) = 0x0101010108082626; ++ *((unsigned long *)&__m128i_result[0]) = 0x01010101ffff7878; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00fe000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x027e0000000000ff; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffb4ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffff98dea; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xc00fffffffffb4ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xbf0c05fffff98dea; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff00; ++ *((unsigned long *)&__m128i_result[1]) = 0x010101010101012f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0101010101010129; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_result[1]) = 0x1202120212021202; ++ *((unsigned long *)&__m128i_result[0]) = 0x1202120212021202; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0fffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41957fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[0]) = 0xbf6b810181018101; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff8ffa2fffdffb0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0108015e01030150; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000017f0000; ++ __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007fffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xf436f3f52f4ef4a8; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf4b6f3f52f4ef4a8; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff01ff01; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000101fd01fe; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0040004000400040; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc0ffc0ffc0ffc0; ++ *((unsigned long *)&__m128i_result[0]) = 0xffc0ffc0ffc0ffc0; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fc0010181020103; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fc0ffff81020103; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001e03; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000011e04; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363abdf16; ++ *((unsigned long *)&__m128i_op0[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000002427c2ee; ++ *((unsigned long *)&__m128i_result[1]) = 0x636363633f3e47c1; ++ *((unsigned long *)&__m128i_result[0]) = 0x41f8e080f1ef4eaa; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001fffe00014b41; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffe0001ffde; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0002ffffb4bf; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0002ffff0022; ++ __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001fc0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000002010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000001fbdff0; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x03f1e3d28b1a8a1a; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000001d5d4; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000150d707009; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000fffe2a2c; ++ *((unsigned long *)&__m128i_result[0]) = 0x03f1e3bd80000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffd5002affffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x343d8dc6b0ed5a08; ++ *((unsigned long *)&__m128i_result[1]) = 0x002affd600000001; ++ *((unsigned long *)&__m128i_result[0]) = 0xcbc2723a4f12a5f8; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffdfffffffe0; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffdfffffffe0; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000c2f90000bafa; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000c2fa8000c2fa; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff3d06ffff4506; ++ *((unsigned long *)&__m128i_result[0]) = 0x7ffffffe7ffff800; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffffff3fffffff3; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff3fffffff4; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff3fffffff4; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ef8000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x8108000000000000; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000063b2ac27; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffaa076aeb; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff63b3584e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000fffdaa07d5d6; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff81; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff7c; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff7cffd6ffc700b0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x008300290038ff50; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0dec4d1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff3f213b2f; ++ __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c +new file mode 100644 +index 000000000..0d5987567 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c +@@ -0,0 +1,408 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7f801fa06451ef11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x68bcf93435ed25ed; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffb64c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000003900; ++ *((unsigned long *)&__m128i_result[0]) = 0x68bcf93435ed25ed; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x460f3b393ef4be3a; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x04e00060ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x04e00060ffffffff; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001c; ++ *((unsigned long *)&__m128i_result[1]) = 0x004200a000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x004200a000200000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_op0[0]) = 0x342caf9be5579ebe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000f909; ++ *((unsigned long *)&__m128i_result[1]) = 0x0c03e17edd781b11; ++ *((unsigned long *)&__m128i_result[0]) = 0x342caf9be55700b5; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x11000f2010000e20; ++ *((unsigned long *)&__m128i_result[0]) = 0x0f000d200e000c20; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0x000fffefffefffef; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4e3e13368c17f6e6; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111311111114111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111311111112111; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111311111114111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111311111110000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfefefefefefefefe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0002000200020002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0202fe02fd020102; ++ *((unsigned long *)&__m128i_result[1]) = 0xfefcfefcfefcfefc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfcfc00fc01fcfdfc; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00004000ffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc5c53492f25acbf2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[1]) = 0xbfd10d0d7b6b6b73; ++ *((unsigned long *)&__m128i_result[0]) = 0xc5c534920000c4ed; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000200; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff0000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x00ff000000ff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xc14eef7fc14ea000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000ea000010fa101; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xb); ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000006ffef000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffc2ffe700000007; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ffc100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x41dfffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xbde2ffe800000007; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffc100010001; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000f3040705; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xa000308000008002; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0500847b00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x006f0efe258ca851; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff90ffffe0f5; ++ *((unsigned long *)&__m128i_result[0]) = 0x006e7973258d0ef4; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ca02f854; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000d0000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363635663636356; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c +new file mode 100644 +index 000000000..8afdffa50 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c +@@ -0,0 +1,70 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x0; ++ __lsx_vst (__m128i_op0, (unsigned long *)&__m128i_result, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_op0, __m128i_result); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x0; ++ __lsx_vstx (__m128i_op0, (unsigned long *)&__m128i_result, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_op0, __m128i_result); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x05; ++ *((unsigned long *)&__m128i_out[1]) = 0x0; ++ *((unsigned long *)&__m128i_out[0]) = 0x0; ++ __lsx_vstelm_b (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x5c05; ++ *((unsigned long *)&__m128i_out[1]) = 0x0; ++ *((unsigned long *)&__m128i_out[0]) = 0x0; ++ __lsx_vstelm_h (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0xc9d85c05; ++ *((unsigned long *)&__m128i_out[1]) = 0x0; ++ *((unsigned long *)&__m128i_out[0]) = 0x0; ++ __lsx_vstelm_w (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0; ++ *((unsigned long *)&__m128i_result[0]) = 0x1dcc4255c9d85c05; ++ *((unsigned long *)&__m128i_out[1]) = 0x0; ++ *((unsigned long *)&__m128i_out[0]) = 0x0; ++ __lsx_vstelm_d (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c +new file mode 100644 +index 000000000..f5c82bc74 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c +@@ -0,0 +1,381 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000000001fe; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xc6ffe000c6fde000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808081; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffe000ffffe000; ++ *((unsigned long *)&__m128i_result[0]) = 0x467f6080467d607f; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00ff00fe00fe00ff; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x7fff00007fff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff0000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0dec4d1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000040223c2e; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfd200ed2fd370775; ++ *((unsigned long *)&__m128i_op0[0]) = 0x96198318780e32c5; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffe65ecc1be5bc; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe65ecc1be5bc; ++ *((unsigned long *)&__m128i_result[1]) = 0xfe212874311c22b9; ++ *((unsigned long *)&__m128i_result[0]) = 0x971a9dbaacf34d09; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x0); ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x4f4f4f4f4f4f4f4f; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f4f4f4f4f4f4f4f; ++ __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0404038383838404; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf000e001bf84df83; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff8e001ff84e703; ++ *((unsigned long *)&__m128i_result[1]) = 0x14042382c3ffa481; ++ *((unsigned long *)&__m128i_result[0]) = 0x040c238283ff9d01; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0141010101410101; ++ *((unsigned long *)&__m128i_result[1]) = 0xfebffefffebffeff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfebffefffebffeff; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x1111111111111111; ++ *((unsigned long *)&__m128i_result[0]) = 0x1111111111111111; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000700000004fdff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000300000000fdff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff7fffefffa01ff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffbfffefffe01ff; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000cd630000cd63; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000329d0000329d; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x08080807f7f7f7f8; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100; ++ *((unsigned long *)&__m128i_result[1]) = 0x08080805f5f5f5f8; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00060eb000000006; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000075c00000cf0; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffaf1500000fffa; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000f8a40000f310; ++ __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff100fffc; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffdf100fffc; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000007f7f7f7f; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000010; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00001802041b0014; ++ __m128i_out = __lsx_vsub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000f7d1000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x773324887fffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff082efffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x88cbdb7780000001; ++ __m128i_out = __lsx_vsub_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001f50000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffe0b0000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000fffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0010000000000001; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000800080; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000015; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0007000000050000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001fffe0001fefc; ++ *((unsigned long *)&__m128i_result[1]) = 0x0006000100040001; ++ *((unsigned long *)&__m128i_result[0]) = 0x00010002ffff0105; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000003fffffffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000003fffffffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000003fffffffe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000003fffffffd; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363abdf16; ++ *((unsigned long *)&__m128i_op1[0]) = 0x41f8e08016161198; ++ *((unsigned long *)&__m128i_result[1]) = 0x9c9d9b9bbfaa20e9; ++ *((unsigned long *)&__m128i_result[0]) = 0xbe081c963e6fee68; ++ __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c +new file mode 100644 +index 000000000..37e0ccf4d +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c +@@ -0,0 +1,329 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfff489b693120950; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffc45a851c40c18; ++ *((unsigned long *)&__m128i_result[1]) = 0xe0d56a9774f3ea31; ++ *((unsigned long *)&__m128i_result[0]) = 0xe0dd268932a5edf9; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff88; ++ *((unsigned long *)&__m128i_result[1]) = 0xe5e5e5e5e5e5e5e5; ++ *((unsigned long *)&__m128i_result[0]) = 0xe5e5e5e5e4e4e46d; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_result[1]) = 0xf7f7f7ff8e8c6d7e; ++ *((unsigned long *)&__m128i_result[0]) = 0xf7f7f7f7f7f7fbff; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1); ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xe6e6e6e6e6e6e6e6; ++ *((unsigned long *)&__m128i_result[0]) = 0xe6e6e6e6e6e6e6e6; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf8f8f8f8f8f8f8f8; ++ *((unsigned long *)&__m128i_result[0]) = 0xf8f8f8f8f8f8f8f8; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x8); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xe9e9e9e9e9e9e9e9; ++ *((unsigned long *)&__m128i_result[0]) = 0x171d423524e9e9e9; ++ __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x17); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe2ffe2ffe2ffe2; ++ *((unsigned long *)&__m128i_result[0]) = 0xffe2ffe2ffe2ffe2; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9795698585057dec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x87f82867431a1d08; ++ *((unsigned long *)&__m128i_result[1]) = 0x9780697084f07dd7; ++ *((unsigned long *)&__m128i_result[0]) = 0x87e3285243051cf3; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffcfffc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000101; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffc00fd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x4); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x371fe00000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_result[0]) = 0x370bdfecffecffec; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x14); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000040600000406; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020202020202fe02; ++ *((unsigned long *)&__m128i_result[1]) = 0xfff503fbfff503fb; ++ *((unsigned long *)&__m128i_result[0]) = 0x01f701f701f7fdf7; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffdfffdfffd; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x2); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x803e0000803e0000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x803e0000803e0000; ++ *((unsigned long *)&__m128i_result[1]) = 0x803bfffd803bfffd; ++ *((unsigned long *)&__m128i_result[0]) = 0x803bfffd803bfffd; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffdfffdfffdfffd; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffdfffdfffd; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffedffedffedffed; ++ *((unsigned long *)&__m128i_result[0]) = 0xffedffedffedffed; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x13); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffe4ffe4ffe4ffe4; ++ *((unsigned long *)&__m128i_result[0]) = 0xffe4ffe4ffe4ffe4; ++ __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffefffffffef; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffefffffffef; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x11); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffe6ffffffe6; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffe6ffffffe6; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x19); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff1fffffff1; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff6fffffff6; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff6fffffff6; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffe4ffffffe4; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffe4ffffffe4; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x1c); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffe1ffffffe1; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffe1ffffffe1; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffff1fffffff1; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffff1fffffff1; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffab7e71e33848; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffe1ffffffe1; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffab5f71e33829; ++ __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xa8beed87bc3f2be1; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0024d8f6a494006a; ++ *((unsigned long *)&__m128i_result[1]) = 0xa8beed87bc3f2bd3; ++ *((unsigned long *)&__m128i_result[0]) = 0x0024d8f6a494005c; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0xe); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffeb; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe1; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffe1; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1f); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff7; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe5; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffe5; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xf2f2e5e5e5e5e5e5; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xf2f2e5e5e5e5e5dc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x9); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fffff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x3ffffeffffffffe5; ++ *((unsigned long *)&__m128i_result[0]) = 0x3ffffeffffffffe5; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1b); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000070; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff5; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0xb); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff0; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x10); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe6; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffe6; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1a); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100010000fffb; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010000fffb; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffeb; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffeb; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x15); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x6); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffe80008000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe2; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffdfffe80007fe2; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1e); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x001a001a001a001a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x001a001a001a001a; ++ *((unsigned long *)&__m128i_result[1]) = 0x001a001a001a000b; ++ *((unsigned long *)&__m128i_result[0]) = 0x001a001a001a000b; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0xf); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000000234545b; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0dec4d1; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000002345454; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000c0dec4ca; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x7); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0f8d33000f8d3300; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0003b80000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0f8d33000f8d32fd; ++ *((unsigned long *)&__m128i_result[0]) = 0x0003b7fffffffffd; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x3); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x0); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c +new file mode 100644 +index 000000000..f0d391a09 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c +@@ -0,0 +1,326 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x43d3e0000013e000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffd3000000130000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffd3000000130000; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff0001ffff9515; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x000100010000ffda; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000200000016; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffbfbfbfc0; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffbfbfbfc0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040; ++ *((unsigned long *)&__m128i_result[1]) = 0xffbfffbfff7fff80; ++ *((unsigned long *)&__m128i_result[0]) = 0xffbfffbfff7fff80; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000808000020200; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ff8000020000; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x413e276583869d79; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7f7f017f9d8726d3; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7c7cd2eb63637c52; ++ *((unsigned long *)&__m128i_op1[0]) = 0x82ffd2210127add2; ++ *((unsigned long *)&__m128i_result[1]) = 0xffc2007aff230027; ++ *((unsigned long *)&__m128i_result[0]) = 0x0080005eff600001; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffee; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010012; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffe1ffc0; ++ __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000004000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffc000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ffff00; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000100c6ffef10c; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffff01; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffeff400000df4; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000002050320; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000001c88bf0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000320; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000007730; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000001030103; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000103; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x33eac9fdca42f660; ++ *((unsigned long *)&__m128i_op0[0]) = 0xaa472d26fe867091; ++ *((unsigned long *)&__m128i_op1[1]) = 0x33eac9fdca42f660; ++ *((unsigned long *)&__m128i_op1[0]) = 0xaa472d26fe867091; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000208000002080; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff0000857a; ++ *((unsigned long *)&__m128i_op1[0]) = 0x05fafe0101fe000e; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff7a86; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffe01fff2; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xf654ad7447e59090; ++ *((unsigned long *)&__m128i_op1[0]) = 0x27b1b106b8145f50; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffb81a6f70; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000047eba0b0; ++ __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000c01020d8009; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000003004; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000c01020d5005; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffff01ff01; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000d; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4f804f804f804f80; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xb9fe3640e4eb1b18; ++ *((unsigned long *)&__m128i_op0[0]) = 0x800000005b4b1b18; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb9fe00003640; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffe4eb00001b18; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x80001b155b4b0000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000008; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100080000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefff80000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x3fc03fc000000004; ++ *((unsigned long *)&__m128i_op1[1]) = 0x3fc03fc000000003; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7f7f1fd800000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xc0411fe800000000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0e440; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffffffefffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01e420fff0e442; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c +new file mode 100644 +index 000000000..3b18bc13c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c +@@ -0,0 +1,417 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00000083; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004; ++ *((unsigned long *)&__m128i_result[1]) = 0xff01ff010000ff7d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000fffc; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff00fc0000ff02; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xff01ff040000fffe; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac; ++ *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x21011f3f193d173b; ++ *((unsigned long *)&__m128i_op1[0]) = 0xff39ff37ff35ff33; ++ *((unsigned long *)&__m128i_result[1]) = 0x00fe008e009e0071; ++ *((unsigned long *)&__m128i_result[0]) = 0x001c006f00c4008d; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x9c9ca19d509ae734; ++ *((unsigned long *)&__m128i_op0[0]) = 0xd1b09480f2123460; ++ *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000001fffeff98; ++ *((unsigned long *)&__m128i_result[0]) = 0x0014ffe4ff76ffc4; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92; ++ *((unsigned long *)&__m128i_op1[0]) = 0xee297a731e5c5f86; ++ *((unsigned long *)&__m128i_result[1]) = 0xff6cffb5ff98ff6e; ++ *((unsigned long *)&__m128i_result[0]) = 0xffd7ff8dffa4ff7a; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8dada; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffff3ea5016b; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffefffe3f6fb04d; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000d96f; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffd83b; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000f0009d3c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000016fff9d3d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000bd0; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000000007f0; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000916c; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000010000954d; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000100010000fe01; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000050000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000500000005; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffbffffff85; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffc0000fdfc; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000032; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000032; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffff80df00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xa5c4c774856ba837; ++ *((unsigned long *)&__m128i_op1[0]) = 0x2a569f8081c3bbe9; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff6080ffff4417; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000063b2ac27; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffaa076aeb; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff53d9; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff0001ffff9515; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf00040fbf; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf00000fbf; ++ *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffac5cffffac5c; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffac5cffffac5c; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffaefffbffaefffb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffaefffbffaefffb; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff0005ffff0005; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff000500000004; ++ __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000a1630000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000a1630000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001fd0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001fd0; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xff7ffffef77fffdd; ++ *((unsigned long *)&__m128i_op1[0]) = 0xf77edf9cffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000008800022; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000001; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffda6f; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffe3d7; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffda6e; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffe3d6; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000807f00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x80006b0080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff00011cf0c569; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc0000002b0995850; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffe30f3a97; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffcfe72830; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff9f5c25; ++ *((unsigned long *)&__m128i_op0[0]) = 0x58fa6b4000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000000ff9f5c25; ++ *((unsigned long *)&__m128i_op1[0]) = 0x58fa6b4000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xcda585aebbb2836a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000080808080; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffc4cdfd16; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2); ++ *((unsigned long *)&__m128i_op0[1]) = 0x801dd5cb0004e058; ++ *((unsigned long *)&__m128i_op0[0]) = 0x77eb15638eeb5fc2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000200000001b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000004e03d; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000008eeb5fc2; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000100c6ffef00d; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000c00000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000006f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000bfffffffe0f6; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffdfffcfffd; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffdfffcfffd; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffff7e00000081; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0a0a0a0a0a0a0a0a; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffb96bffff57c9; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff6080ffff4417; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0a0aa9890a0ac5f3; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x36fbdfdcffdcffdc; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffeffff; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000a752a55; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0a753500a9fa0d06; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xf589caff5605f2fa; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x087c000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000000000087c; ++ *((unsigned long *)&__m128i_op1[1]) = 0x10f8000100000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000001000010f8; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffff784; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000000000000; ++ __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c +new file mode 100644 +index 000000000..39ebff154 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c +@@ -0,0 +1,326 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc485edbcc0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x003f000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007c000d00400000; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x841f000fc28f801f; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007c0000003e0080; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000007fff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000001001; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0xff80000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffff8000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff8000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffefe; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffc2ba; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000027f000000fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000018000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001f0a; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff7a53; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000ff0000ff86; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffa6ff91fdd8ef77; ++ *((unsigned long *)&__m128i_op1[0]) = 0x061202bffb141c38; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000005a00000228; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffff9ee000004ec; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x000000001fe02000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000001fe02000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03; ++ *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000002345454; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0dec4ca; ++ *((unsigned long *)&__m128i_result[1]) = 0x000030ebffffffdc; ++ *((unsigned long *)&__m128i_result[0]) = 0x00000203ffffff25; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x380fdfdfc0000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffc7f100004000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00005dcbe7e830c0; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000015d926c7; ++ *((unsigned long *)&__m128i_op1[0]) = 0x000000000000e41b; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000005dcb; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00f0008100800080; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00f000807000009e; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003c853c843c87e; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000ec382e; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000ec382d; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcfcfcfcfcfd; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcfcfcfc0000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00009c7c00007176; ++ *((unsigned long *)&__m128i_result[1]) = 0xfffffffffcfcfcfc; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffffffffcfc6080; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3; ++ *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3; ++ *((unsigned long *)&__m128i_op1[1]) = 0xffaefffbffaefffb; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffaefffbffaefffb; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffc105d1aa; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffbc19ecca; ++ __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000101fd01fe; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfffff0000000ad3d; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffff000fffff000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xefffdffff0009d3d; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ff0000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffff01; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000100010001007c; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x5f675e96e29a5a60; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x5e695e95e1cb5a01; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7efefefe82010201; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c +new file mode 100644 +index 000000000..62837f1ac +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c +@@ -0,0 +1,308 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int ++main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe07e5fefefdddfe; ++ *((unsigned long *)&__m128i_op1[0]) = 0x00020100fedd0c00; ++ *((unsigned long *)&__m128i_result[1]) = 0xff02ff1bff02ff23; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000ffffff02fff4; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfffefff6fff80002; ++ *((unsigned long *)&__m128i_op1[1]) = 0x82c53a0000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc72ef153fc02fdf7; ++ *((unsigned long *)&__m128i_result[1]) = 0x007d00c500ff00ff; ++ *((unsigned long *)&__m128i_result[0]) = 0x0038000e0003ff03; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x007f000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x007f000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000040000000400; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000010; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff800000000000; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfc01fd1300000001; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe00fd1400010000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffff800; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002; ++ *((unsigned long *)&__m128i_op1[1]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_op1[0]) = 0xfe813f00fe813f00; ++ *((unsigned long *)&__m128i_result[1]) = 0xffff017fffff017f; ++ *((unsigned long *)&__m128i_result[0]) = 0xffff017fffff017f; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e71768fa4; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x00009c7c00007176; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0001000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000; ++ __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000008; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000009; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408; ++ *((unsigned long *)&__m128i_op1[1]) = 0x7fff0007e215b122; ++ *((unsigned long *)&__m128i_op1[0]) = 0x7ffeffff7bfff828; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffff80010001; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffff80010001; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x00000af555555555; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00000af555555555; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000af5; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000af5; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x2e34594c3b000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x000000002e34594c; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000036280001; ++ *((unsigned long *)&__m128i_op0[0]) = 0x42a0000042a02001; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000036280001; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xd0b1ffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0x9d519ee8d2d84f1d; ++ *((unsigned long *)&__m128i_op1[1]) = 0x8644ffff0000ffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000fffe; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x4a6d0000ffff0000; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x82c539ffffffffff; ++ *((unsigned long *)&__m128i_op1[0]) = 0xc72df14afbfafdf9; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x7d3ac60000000000; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00fe00ff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op0[0]) = 0xfeffffffffffffff; ++ *((unsigned long *)&__m128i_op1[1]) = 0x00000fffffffe000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0000102020204000; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0xfefff00000001fff; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0003000300000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003; ++ *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long *)&__m128i_result[0]) = 0xfffcfffd00000000; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363; ++ *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000; ++ *((unsigned long *)&__m128i_op1[0]) = 0x0002000200000001; ++ *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long *)&__m128i_result[0]) = 0x6363636163636363; ++ __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1); ++ ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c +new file mode 100644 +index 000000000..72fa97174 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c +@@ -0,0 +1,79 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x00000000f4012ceb; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x00000000f4012ceb; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000001; ++ *((unsigned long*)& __m128i_result[0]) = 0x40f3fa0000000000; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000068; ++ *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[0]) = 0x0000000080000068; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9; ++ *((unsigned long*)& __m128i_op0[0]) = 0xff000001ffff9515; ++ *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff; ++ *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9514; ++ *((unsigned long*)& __m128i_result[1]) = 0xffffffff0000ac26; ++ *((unsigned long*)& __m128i_result[0]) = 0x00ff000000000001; ++ __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c +new file mode 100644 +index 000000000..cc823d4ba +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c +@@ -0,0 +1,67 @@ ++/* { dg-do run } */ ++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */ ++#include "../simd_correctness_check.h" ++#include ++ ++int main () ++{ ++ __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result; ++ __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result; ++ __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result; ++ ++ int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail; ++ long int long_op0, long_op1, long_op2, lont_out, lont_result; ++ long int long_int_out, long_int_result; ++ unsigned int unsigned_int_out, unsigned_int_result; ++ unsigned long int unsigned_long_int_out, unsigned_long_int_result; ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404; ++ *((unsigned long*)& __m128i_result[0]) = 0x0404040404040404; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x4); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000100; ++ *((unsigned long*)& __m128i_result[1]) = 0x5a5a5a5a5b5a5b5a; ++ *((unsigned long*)& __m128i_result[0]) = 0x5a5a5a5a5b5a5b5a; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x5a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0xe3e3e3e3e3e3e3e3; ++ *((unsigned long*)& __m128i_result[0]) = 0xe3e3e3e3e3e3e3e3; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0xe3); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020; ++ *((unsigned long*)& __m128i_result[1]) = 0x9a9a9a9a9a9a9a9a; ++ *((unsigned long*)& __m128i_result[0]) = 0x9aba9aba9aba9aba; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x9a); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000; ++ *((unsigned long*)& __m128i_result[1]) = 0x9090909090909090; ++ *((unsigned long*)& __m128i_result[0]) = 0x9090909090909090; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0x90); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ *((unsigned long*)& __m128i_op0[1]) = 0x00000000b81c8382; ++ *((unsigned long*)& __m128i_op0[0]) = 0x0000000077af9450; ++ *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273; ++ *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1865e65a1; ++ __m128i_out = __lsx_vxori_b(__m128i_op0,0xf1); ++ ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out); ++ ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h +new file mode 100644 +index 000000000..eb7fbd59c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h +@@ -0,0 +1,54 @@ ++#include ++#include ++#include ++ ++#define ASSERTEQ_64(line, ref, res) \ ++ do \ ++ { \ ++ int fail = 0; \ ++ for (size_t i = 0; i < sizeof (res) / sizeof (res[0]); ++i) \ ++ { \ ++ long *temp_ref = &ref[i], *temp_res = &res[i]; \ ++ if (abs (*temp_ref - *temp_res) > 0) \ ++ { \ ++ printf (" error: %s at line %ld , expected " #ref \ ++ "[%ld]:0x%lx, got: 0x%lx\n", \ ++ __FILE__, line, i, *temp_ref, *temp_res); \ ++ fail = 1; \ ++ } \ ++ } \ ++ if (fail == 1) \ ++ abort (); \ ++ } \ ++ while (0) ++ ++#define ASSERTEQ_32(line, ref, res) \ ++ do \ ++ { \ ++ int fail = 0; \ ++ for (size_t i = 0; i < sizeof (res) / sizeof (res[0]); ++i) \ ++ { \ ++ int *temp_ref = &ref[i], *temp_res = &res[i]; \ ++ if (abs (*temp_ref - *temp_res) > 0) \ ++ { \ ++ printf (" error: %s at line %ld , expected " #ref \ ++ "[%ld]:0x%x, got: 0x%x\n", \ ++ __FILE__, line, i, *temp_ref, *temp_res); \ ++ fail = 1; \ ++ } \ ++ } \ ++ if (fail == 1) \ ++ abort (); \ ++ } \ ++ while (0) ++ ++#define ASSERTEQ_int(line, ref, res) \ ++ do \ ++ { \ ++ if (ref != res) \ ++ { \ ++ printf (" error: %s at line %ld , expected %d, got %d\n", __FILE__, \ ++ line, ref, res); \ ++ } \ ++ } \ ++ while (0) +-- +2.43.5 + diff --git a/LoongArch-Define-macro-CLEAR_INSN_CACHE.patch b/LoongArch-Define-macro-CLEAR_INSN_CACHE.patch new file mode 100644 index 0000000..6aa82bc --- /dev/null +++ b/LoongArch-Define-macro-CLEAR_INSN_CACHE.patch @@ -0,0 +1,36 @@ +From 809f937d2199c8e96bde42b2fa83586f5b96a466 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Mon, 23 Oct 2023 09:43:05 +0800 +Subject: [PATCH 19/30] LoongArch: Define macro CLEAR_INSN_CACHE. + +LoongArch's microstructure ensures cache consistency by hardware. +Due to out-of-order execution, "ibar" is required to ensure the visibility of the +store (invalidated icache) executed by this CPU before "ibar" (to the instance). +"ibar" will not invalidate the icache, so the start and end parameters are not Affect +"ibar" performance. + +gcc/ChangeLog: + + * config/loongarch/loongarch.h (CLEAR_INSN_CACHE): New definition. + +Change-Id: Ie1620b10156d6a4d84280f781c03870dafdff08f +--- + gcc/config/loongarch/loongarch.h | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index 2f3117fd0..bf41ba223 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1316,3 +1316,8 @@ struct GTY (()) machine_function + (TARGET_HARD_FLOAT ? (TARGET_DOUBLE_FLOAT ? 8 : 4) : 0) + + #define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN) ++ ++/* LoongArch maintains ICache/DCache coherency by hardware, ++ we just need "ibar" to avoid instruction hazard here. */ ++#undef CLEAR_INSN_CACHE ++#define CLEAR_INSN_CACHE(beg, end) __builtin_loongarch_ibar (0) +-- +2.43.5 + diff --git a/LoongArch-Fix-insn-output-of-vec_concat-templates-fo.patch b/LoongArch-Fix-insn-output-of-vec_concat-templates-fo.patch new file mode 100644 index 0000000..307eb62 --- /dev/null +++ b/LoongArch-Fix-insn-output-of-vec_concat-templates-fo.patch @@ -0,0 +1,135 @@ +From 9ae5f22b71afa0c68d9805da526b6b906c5ddb3c Mon Sep 17 00:00:00 2001 +From: Chenghui Pan +Date: Fri, 22 Dec 2023 11:36:46 +0800 +Subject: [PATCH 22/30] LoongArch: Fix insn output of vec_concat templates for + LASX. + +When investigaing failure of gcc.dg/vect/slp-reduc-sad.c, following +instruction block are being generated by vec_concatv32qi (which is +generated and matched in vec_initv32qiv16qi) at entrance of foo() +function: + + vldx $vr3,$r5,$r6 + vld $vr2,$r5,0 + xvpermi.q $xr2,$xr3,0x20 + +causes the reversion of vec_initv32qiv16qi operation's high and +low 128-bit part. + +According to other target's similar impl and LSX impl for following +RTL representation, current definition in lasx.md of "vec_concat" +are wrong: + + (set (op0) (vec_concat (op1) (op2))) + +For correct behavior, the last argument of xvpermi.q should be 0x02 +instead of 0x20. This patch fixes this issue and cleanup the vec_concat +template impl. + +gcc/ChangeLog: + + * config/loongarch/lasx.md (vec_concatv4di): Delete. + (vec_concatv8si): Delete. + (vec_concatv16hi): Delete. + (vec_concatv32qi): Delete. + (vec_concatv4df): Delete. + (vec_concatv8sf): Delete. + (vec_concat): New template with insn output fixed. + +Change-Id: I4e3aa2190549c1738cb4c1ac2ea2d702fec68f63 +--- + gcc/config/loongarch/lasx.md | 74 ++++-------------------------------- + 1 file changed, 7 insertions(+), 67 deletions(-) + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index ad2844f2d..1b5fba6e2 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -640,77 +640,17 @@ + [(set_attr "type" "simd_insert") + (set_attr "mode" "")]) + +-(define_insn "vec_concatv4di" +- [(set (match_operand:V4DI 0 "register_operand" "=f") +- (vec_concat:V4DI +- (match_operand:V2DI 1 "register_operand" "0") +- (match_operand:V2DI 2 "register_operand" "f")))] +- "ISA_HAS_LASX" +-{ +- return "xvpermi.q\t%u0,%u2,0x20"; +-} +- [(set_attr "type" "simd_splat") +- (set_attr "mode" "V4DI")]) +- +-(define_insn "vec_concatv8si" +- [(set (match_operand:V8SI 0 "register_operand" "=f") +- (vec_concat:V8SI +- (match_operand:V4SI 1 "register_operand" "0") +- (match_operand:V4SI 2 "register_operand" "f")))] +- "ISA_HAS_LASX" +-{ +- return "xvpermi.q\t%u0,%u2,0x20"; +-} +- [(set_attr "type" "simd_splat") +- (set_attr "mode" "V4DI")]) +- +-(define_insn "vec_concatv16hi" +- [(set (match_operand:V16HI 0 "register_operand" "=f") +- (vec_concat:V16HI +- (match_operand:V8HI 1 "register_operand" "0") +- (match_operand:V8HI 2 "register_operand" "f")))] +- "ISA_HAS_LASX" +-{ +- return "xvpermi.q\t%u0,%u2,0x20"; +-} +- [(set_attr "type" "simd_splat") +- (set_attr "mode" "V4DI")]) +- +-(define_insn "vec_concatv32qi" +- [(set (match_operand:V32QI 0 "register_operand" "=f") +- (vec_concat:V32QI +- (match_operand:V16QI 1 "register_operand" "0") +- (match_operand:V16QI 2 "register_operand" "f")))] +- "ISA_HAS_LASX" +-{ +- return "xvpermi.q\t%u0,%u2,0x20"; +-} +- [(set_attr "type" "simd_splat") +- (set_attr "mode" "V4DI")]) +- +-(define_insn "vec_concatv4df" +- [(set (match_operand:V4DF 0 "register_operand" "=f") +- (vec_concat:V4DF +- (match_operand:V2DF 1 "register_operand" "0") +- (match_operand:V2DF 2 "register_operand" "f")))] +- "ISA_HAS_LASX" +-{ +- return "xvpermi.q\t%u0,%u2,0x20"; +-} +- [(set_attr "type" "simd_splat") +- (set_attr "mode" "V4DF")]) +- +-(define_insn "vec_concatv8sf" +- [(set (match_operand:V8SF 0 "register_operand" "=f") +- (vec_concat:V8SF +- (match_operand:V4SF 1 "register_operand" "0") +- (match_operand:V4SF 2 "register_operand" "f")))] ++(define_insn "vec_concat" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (vec_concat:LASX ++ (match_operand: 1 "register_operand" "0") ++ (match_operand: 2 "register_operand" "f")))] + "ISA_HAS_LASX" + { +- return "xvpermi.q\t%u0,%u2,0x20"; ++ return "xvpermi.q\t%u0,%u2,0x02"; + } + [(set_attr "type" "simd_splat") +- (set_attr "mode" "V4DI")]) ++ (set_attr "mode" "")]) + + ;; xshuf.w + (define_insn "lasx_xvperm_" +-- +2.43.5 + diff --git a/LoongArch-Implement-option-save-restore.patch b/LoongArch-Implement-option-save-restore.patch new file mode 100644 index 0000000..c3c8d19 --- /dev/null +++ b/LoongArch-Implement-option-save-restore.patch @@ -0,0 +1,317 @@ +From 72504ec9719bcf6b4a9669dfdd491ccddd3228bd Mon Sep 17 00:00:00 2001 +From: Yang Yujie +Date: Mon, 22 Jan 2024 16:57:55 +0800 +Subject: [PATCH 26/30] LoongArch: Implement option save/restore. + +LTO option streaming and target attributes both require per-function +target configuration, which is achieved via option save/restore. + +We implement TARGET_OPTION_{SAVE,RESTORE} to switch the la_target +context in addition to other automatically maintained option states +(via the "Save" option property in the .opt files). + +Change-Id: I888f61fda0d3e524bc8e935d228e5b8e3a07d3a5 +--- + gcc/config/loongarch/genopts/loongarch.opt.in | 38 +++++++++---------- + gcc/config/loongarch/loongarch-opts.c | 3 ++ + gcc/config/loongarch/loongarch.c | 28 ++++++++++++++ + gcc/config/loongarch/loongarch.opt | 38 +++++++++---------- + 4 files changed, 69 insertions(+), 38 deletions(-) + +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index b8aab4ca8..2a3006e33 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -62,7 +62,7 @@ EnumValue + Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU64@@) Value(ISA_EXT_FPU64) + + m@@OPTSTR_ISA_EXT_FPU@@= +-Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) ++Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) Save + -m@@OPTSTR_ISA_EXT_FPU@@=FPU Generate code for the given FPU. + + m@@OPTSTR_ISA_EXT_FPU@@=@@STR_ISA_EXT_FPU0@@ +@@ -94,7 +94,7 @@ EnumValue + Enum(isa_ext_simd) String(@@STR_ISA_EXT_LASX@@) Value(ISA_EXT_SIMD_LASX) + + m@@OPTSTR_ISA_EXT_SIMD@@= +-Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) ++Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) Save + -m@@OPTSTR_ISA_EXT_SIMD@@=SIMD Generate code for the given SIMD extension. + + m@@STR_ISA_EXT_LSX@@ +@@ -132,11 +132,11 @@ EnumValue + Enum(cpu_type) String(@@STR_CPU_LA364@@) Value(CPU_LA364) + + m@@OPTSTR_ARCH@@= +-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) Save + -m@@OPTSTR_ARCH@@=PROCESSOR Generate code for the given PROCESSOR ISA. + + m@@OPTSTR_TUNE@@= +-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) Save + -m@@OPTSTR_TUNE@@=PROCESSOR Generate optimized code for PROCESSOR. + + +@@ -171,59 +171,59 @@ Variable + int la_opt_abi_ext = M_OPT_UNSET + + mbranch-cost= +-Target RejectNegative Joined UInteger Var(loongarch_branch_cost) ++Target RejectNegative Joined UInteger Var(loongarch_branch_cost) Save + -mbranch-cost=COST Set the cost of branches to roughly COST instructions. + + mvecarg +-Target Report Var(TARGET_VECARG) Init(1) ++Target Report Var(TARGET_VECARG) Init(1) Save + Target pass vect arg uses vector register. + + mmemvec-cost= +-Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) ++Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) Save + mmemvec-cost=COST Set the cost of vector memory access instructions. + + mveclibabi= +-Target RejectNegative Joined Var(loongarch_veclibabi_name) ++Target RejectNegative Joined Var(loongarch_veclibabi_name) Save + Vector library ABI to use. + + mstackrealign +-Target Var(loongarch_stack_realign) Init(1) ++Target Var(loongarch_stack_realign) Init(1) Save + Realign stack in prologue. + + mforce-drap +-Target Var(loongarch_force_drap) Init(0) ++Target Var(loongarch_force_drap) Init(0) Save + Always use Dynamic Realigned Argument Pointer (DRAP) to realign stack. + + mcheck-zero-division +-Target Mask(CHECK_ZERO_DIV) ++Target Mask(CHECK_ZERO_DIV) Save + Trap on integer divide by zero. + + mcond-move-int +-Target Var(TARGET_COND_MOVE_INT) Init(1) ++Target Var(TARGET_COND_MOVE_INT) Init(1) Save + Conditional moves for integral are enabled. + + mcond-move-float +-Target Var(TARGET_COND_MOVE_FLOAT) Init(1) ++Target Var(TARGET_COND_MOVE_FLOAT) Init(1) Save + Conditional moves for float are enabled. + + mmemcpy +-Target Mask(MEMCPY) ++Target Mask(MEMCPY) Save + Prevent optimizing block moves, which is also the default behavior of -Os. + + mstrict-align +-Target Var(TARGET_STRICT_ALIGN) Init(0) ++Target Var(TARGET_STRICT_ALIGN) Init(0) Save + Do not generate unaligned memory accesses. + + mmax-inline-memcpy-size= +-Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) ++Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) Save + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + + mrecip +-Target Report RejectNegative Var(loongarch_recip) ++Target Report RejectNegative Var(loongarch_recip) Save + Generate reciprocals instead of divss and sqrtss. + + mrecip= +-Target Report RejectNegative Joined Var(loongarch_recip_name) ++Target Report RejectNegative Joined Var(loongarch_recip_name) Save + Control generation of reciprocal estimates. + + ; The code model option names for -mcmodel. +@@ -247,5 +247,5 @@ EnumValue + Enum(cmodel) String(@@STR_CMODEL_EXTREME@@) Value(CMODEL_EXTREME) + + mcmodel= +-Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) ++Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) Save + Specify the code model. +diff --git a/gcc/config/loongarch/loongarch-opts.c b/gcc/config/loongarch/loongarch-opts.c +index cf11f67d1..3352946c4 100644 +--- a/gcc/config/loongarch/loongarch-opts.c ++++ b/gcc/config/loongarch/loongarch-opts.c +@@ -719,6 +719,9 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target, + opts->x_la_opt_cpu_arch = target->cpu_arch; + opts->x_la_opt_cpu_tune = target->cpu_tune; + ++ /* status of -mcmodel */ ++ opts->x_la_opt_cmodel = target->cmodel; ++ + /* status of -mfpu and -msimd */ + opts->x_la_opt_fpu = target->isa.fpu; + opts->x_la_opt_simd = target->isa.simd; +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index 6b4db1c45..957eb1048 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -7254,6 +7254,30 @@ loongarch_option_override (void) + loongarch_option_override_internal (&global_options, &global_options_set); + } + ++/* Implement TARGET_OPTION_SAVE. */ ++ ++static void ++loongarch_option_save (struct cl_target_option *, ++ struct gcc_options *opts) ++{ ++ loongarch_update_gcc_opt_status (&la_target, opts, NULL); ++} ++ ++/* Implement TARGET_OPTION_RESTORE. */ ++ ++static void ++loongarch_option_restore (struct gcc_options *, ++ struct cl_target_option *ptr) ++{ ++ la_target.cpu_arch = ptr->x_la_opt_cpu_arch; ++ la_target.cpu_tune = ptr->x_la_opt_cpu_tune; ++ ++ la_target.cmodel = ptr->x_la_opt_cmodel; ++ ++ la_target.isa.fpu = ptr->x_la_opt_fpu; ++ la_target.isa.simd = ptr->x_la_opt_simd; ++} ++ + /* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */ + + static void +@@ -10846,6 +10870,10 @@ loongarch_asan_shadow_offset (void) + + #undef TARGET_OPTION_OVERRIDE + #define TARGET_OPTION_OVERRIDE loongarch_option_override ++#undef TARGET_OPTION_SAVE ++#define TARGET_OPTION_SAVE loongarch_option_save ++#undef TARGET_OPTION_RESTORE ++#define TARGET_OPTION_RESTORE loongarch_option_restore + + #undef TARGET_LEGITIMIZE_ADDRESS + #define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 3dfe5f3cb..d300413a2 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -69,7 +69,7 @@ EnumValue + Enum(isa_ext_fpu) String(64) Value(ISA_EXT_FPU64) + + mfpu= +-Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) ++Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) Save + -mfpu=FPU Generate code for the given FPU. + + mfpu=0 +@@ -101,7 +101,7 @@ EnumValue + Enum(isa_ext_simd) String(lasx) Value(ISA_EXT_SIMD_LASX) + + msimd= +-Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) ++Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) Save + -msimd=SIMD Generate code for the given SIMD extension. + + mlsx +@@ -139,11 +139,11 @@ EnumValue + Enum(cpu_type) String(la364) Value(CPU_LA364) + + march= +-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) Save + -march=PROCESSOR Generate code for the given PROCESSOR ISA. + + mtune= +-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) ++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) Save + -mtune=PROCESSOR Generate optimized code for PROCESSOR. + + +@@ -178,59 +178,59 @@ Variable + int la_opt_abi_ext = M_OPT_UNSET + + mbranch-cost= +-Target RejectNegative Joined UInteger Var(loongarch_branch_cost) ++Target RejectNegative Joined UInteger Var(loongarch_branch_cost) Save + -mbranch-cost=COST Set the cost of branches to roughly COST instructions. + + mvecarg +-Target Report Var(TARGET_VECARG) Init(1) ++Target Report Var(TARGET_VECARG) Init(1) Save + Target pass vect arg uses vector register. + + mmemvec-cost= +-Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) ++Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) Save + mmemvec-cost=COST Set the cost of vector memory access instructions. + + mveclibabi= +-Target RejectNegative Joined Var(loongarch_veclibabi_name) ++Target RejectNegative Joined Var(loongarch_veclibabi_name) Save + Vector library ABI to use. + + mstackrealign +-Target Var(loongarch_stack_realign) Init(1) ++Target Var(loongarch_stack_realign) Init(1) Save + Realign stack in prologue. + + mforce-drap +-Target Var(loongarch_force_drap) Init(0) ++Target Var(loongarch_force_drap) Init(0) Save + Always use Dynamic Realigned Argument Pointer (DRAP) to realign stack. + + mcheck-zero-division +-Target Mask(CHECK_ZERO_DIV) ++Target Mask(CHECK_ZERO_DIV) Save + Trap on integer divide by zero. + + mcond-move-int +-Target Var(TARGET_COND_MOVE_INT) Init(1) ++Target Var(TARGET_COND_MOVE_INT) Init(1) Save + Conditional moves for integral are enabled. + + mcond-move-float +-Target Var(TARGET_COND_MOVE_FLOAT) Init(1) ++Target Var(TARGET_COND_MOVE_FLOAT) Init(1) Save + Conditional moves for float are enabled. + + mmemcpy +-Target Mask(MEMCPY) ++Target Mask(MEMCPY) Save + Prevent optimizing block moves, which is also the default behavior of -Os. + + mstrict-align +-Target Var(TARGET_STRICT_ALIGN) Init(0) ++Target Var(TARGET_STRICT_ALIGN) Init(0) Save + Do not generate unaligned memory accesses. + + mmax-inline-memcpy-size= +-Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) ++Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) Save + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + + mrecip +-Target Report RejectNegative Var(loongarch_recip) ++Target Report RejectNegative Var(loongarch_recip) Save + Generate reciprocals instead of divss and sqrtss. + + mrecip= +-Target Report RejectNegative Joined Var(loongarch_recip_name) ++Target Report RejectNegative Joined Var(loongarch_recip_name) Save + Control generation of reciprocal estimates. + + ; The code model option names for -mcmodel. +@@ -254,5 +254,5 @@ EnumValue + Enum(cmodel) String(extreme) Value(CMODEL_EXTREME) + + mcmodel= +-Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) ++Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) Save + Specify the code model. +-- +2.43.5 + diff --git a/LoongArch-Implement-su-sadv16qi-and-su-sadv32qi-stan.patch b/LoongArch-Implement-su-sadv16qi-and-su-sadv32qi-stan.patch new file mode 100644 index 0000000..51d535c --- /dev/null +++ b/LoongArch-Implement-su-sadv16qi-and-su-sadv32qi-stan.patch @@ -0,0 +1,102 @@ +From e7cc33593dfe39811c41c1bed45b5f1154398163 Mon Sep 17 00:00:00 2001 +From: Jiahao Xu +Date: Sat, 30 Dec 2023 09:41:59 +0800 +Subject: [PATCH 25/30] LoongArch: Implement sadv16qi and sadv32qi + standard names + +Change-Id: I57e8b9f9efa4d7ddf649251299134f1850439904 +--- + gcc/config/loongarch/lasx.md | 34 ++++++++++++++++++++++++++++++++++ + gcc/config/loongarch/lsx.md | 34 ++++++++++++++++++++++++++++++++++ + 2 files changed, 68 insertions(+) + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index 1b5fba6e2..4b1010d42 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -5210,3 +5210,37 @@ + const0_rtx)); + DONE; + }) ++ ++(define_expand "usadv32qi" ++ [(match_operand:V8SI 0 "register_operand") ++ (match_operand:V32QI 1 "register_operand") ++ (match_operand:V32QI 2 "register_operand") ++ (match_operand:V8SI 3 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx t1 = gen_reg_rtx (V32QImode); ++ rtx t2 = gen_reg_rtx (V16HImode); ++ rtx t3 = gen_reg_rtx (V8SImode); ++ emit_insn (gen_lasx_xvabsd_u_bu (t1, operands[1], operands[2])); ++ emit_insn (gen_lasx_xvhaddw_hu_bu (t2, t1, t1)); ++ emit_insn (gen_lasx_xvhaddw_wu_hu (t3, t2, t2)); ++ emit_insn (gen_addv8si3 (operands[0], t3, operands[3])); ++ DONE; ++}) ++ ++(define_expand "ssadv32qi" ++ [(match_operand:V8SI 0 "register_operand") ++ (match_operand:V32QI 1 "register_operand") ++ (match_operand:V32QI 2 "register_operand") ++ (match_operand:V8SI 3 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx t1 = gen_reg_rtx (V32QImode); ++ rtx t2 = gen_reg_rtx (V16HImode); ++ rtx t3 = gen_reg_rtx (V8SImode); ++ emit_insn (gen_lasx_xvabsd_s_b (t1, operands[1], operands[2])); ++ emit_insn (gen_lasx_xvhaddw_hu_bu (t2, t1, t1)); ++ emit_insn (gen_lasx_xvhaddw_wu_hu (t3, t2, t2)); ++ emit_insn (gen_addv8si3 (operands[0], t3, operands[3])); ++ DONE; ++}) +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +index 688c67cc2..e4581b314 100644 +--- a/gcc/config/loongarch/lsx.md ++++ b/gcc/config/loongarch/lsx.md +@@ -3573,6 +3573,40 @@ + DONE; + }) + ++(define_expand "usadv16qi" ++ [(match_operand:V4SI 0 "register_operand") ++ (match_operand:V16QI 1 "register_operand") ++ (match_operand:V16QI 2 "register_operand") ++ (match_operand:V4SI 3 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx t1 = gen_reg_rtx (V16QImode); ++ rtx t2 = gen_reg_rtx (V8HImode); ++ rtx t3 = gen_reg_rtx (V4SImode); ++ emit_insn (gen_lsx_vabsd_u_bu (t1, operands[1], operands[2])); ++ emit_insn (gen_lsx_vhaddw_hu_bu (t2, t1, t1)); ++ emit_insn (gen_lsx_vhaddw_wu_hu (t3, t2, t2)); ++ emit_insn (gen_addv4si3 (operands[0], t3, operands[3])); ++ DONE; ++}) ++ ++(define_expand "ssadv16qi" ++ [(match_operand:V4SI 0 "register_operand") ++ (match_operand:V16QI 1 "register_operand") ++ (match_operand:V16QI 2 "register_operand") ++ (match_operand:V4SI 3 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx t1 = gen_reg_rtx (V16QImode); ++ rtx t2 = gen_reg_rtx (V8HImode); ++ rtx t3 = gen_reg_rtx (V4SImode); ++ emit_insn (gen_lsx_vabsd_s_b (t1, operands[1], operands[2])); ++ emit_insn (gen_lsx_vhaddw_hu_bu (t2, t1, t1)); ++ emit_insn (gen_lsx_vhaddw_wu_hu (t3, t2, t2)); ++ emit_insn (gen_addv4si3 (operands[0], t3, operands[3])); ++ DONE; ++}) ++ + ;; Delete one of two instructions that exactly play the same role. + (define_peephole2 + [(set (match_operand:V2DI 0 "register_operand") +-- +2.43.5 + diff --git a/LoongArch-Optimizations-of-vector-construction.patch b/LoongArch-Optimizations-of-vector-construction.patch new file mode 100644 index 0000000..72a3b9d --- /dev/null +++ b/LoongArch-Optimizations-of-vector-construction.patch @@ -0,0 +1,1199 @@ +From 95379c6fb4842f8a18fa2ed61fd9eae396117b71 Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 07:13:59 +0000 +Subject: [PATCH 16/30] LoongArch: Optimizations of vector construction. + +Signed-off-by: Peng Fan +--- + gcc/config/loongarch/lasx.md | 69 ++ + gcc/config/loongarch/loongarch.c | 637 ++++++++++-------- + gcc/config/loongarch/lsx.md | 134 ++++ + .../loongarch/lasx-vec-construct-opt.c | 102 +++ + .../loongarch/lsx-vec-construct-opt.c | 85 +++ + 5 files changed, 744 insertions(+), 283 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/lasx-vec-construct-opt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/lsx-vec-construct-opt.c + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index edd6e3204..ad2844f2d 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -189,6 +189,9 @@ + UNSPEC_LASX_XVLDI + UNSPEC_LASX_XVLDX + UNSPEC_LASX_XVSTX ++ UNSPEC_LASX_VECINIT_MERGE ++ UNSPEC_LASX_VEC_SET_INTERNAL ++ UNSPEC_LASX_XVILVL_INTERNAL + ]) + + ;; All vector modes with 256 bits. +@@ -258,6 +261,15 @@ + [(V8SF "V4SF") + (V4DF "V2DF")]) + ++;; The attribute gives half int/float modes for vector modes. ++(define_mode_attr VHMODE256_ALL ++ [(V32QI "V16QI") ++ (V16HI "V8HI") ++ (V8SI "V4SI") ++ (V4DI "V2DI") ++ (V8SF "V4SF") ++ (V4DF "V2DF")]) ++ + ;; The attribute gives double modes for vector modes in LASX. + (define_mode_attr VDMODE256 + [(V8SI "V4DI") +@@ -308,6 +320,11 @@ + (V4DI "v4df") + (V8SI "v8sf")]) + ++;; This attribute gives V32QI mode and V16HI mode with half size. ++(define_mode_attr mode256_i_half ++ [(V32QI "v16qi") ++ (V16HI "v8hi")]) ++ + ;; This attribute gives suffix for LASX instructions.HOW? + (define_mode_attr lasxfmt + [(V4DF "d") +@@ -753,6 +770,20 @@ + [(set_attr "type" "simd_splat") + (set_attr "mode" "")]) + ++;; Only for loongarch_expand_vector_init in loongarch.cc. ++;; Support a LSX-mode input op2. ++(define_insn "lasx_vecinit_merge_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX ++ [(match_operand:LASX 1 "register_operand" "0") ++ (match_operand: 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LASX_VECINIT_MERGE))] ++ "ISA_HAS_LASX" ++ "xvpermi.q\t%u0,%u2,%3" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ + (define_insn "lasx_xvpickve2gr_d" + [(set (match_operand:DI 0 "register_operand" "=r") + (any_extend:DI +@@ -776,6 +807,33 @@ + DONE; + }) + ++;; Only for loongarch_expand_vector_init in loongarch.cc. ++;; Simulate missing instructions xvinsgr2vr.b and xvinsgr2vr.h. ++(define_expand "vec_set_internal" ++ [(match_operand:ILASX_HB 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lasx_xvinsgr2vr__internal ++ (operands[0], operands[1], operands[0], index)); ++ DONE; ++}) ++ ++(define_insn "lasx_xvinsgr2vr__internal" ++ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") ++ (unspec:ILASX_HB [(match_operand: 1 "reg_or_0_operand" "rJ") ++ (match_operand:ILASX_HB 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")] ++ UNSPEC_LASX_VEC_SET_INTERNAL))] ++ "ISA_HAS_LASX" ++{ ++ return "vinsgr2vr.\t%w0,%z1,%y3"; ++} ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ + (define_expand "vec_set" + [(match_operand:FLASX 0 "register_operand") + (match_operand: 1 "reg_or_0_operand") +@@ -1587,6 +1645,17 @@ + [(set_attr "type" "simd_flog2") + (set_attr "mode" "")]) + ++;; Only for loongarch_expand_vector_init in loongarch.cc. ++;; Merge two scalar floating-point op1 and op2 into a LASX op0. ++(define_insn "lasx_xvilvl__internal" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand: 1 "register_operand" "f") ++ (match_operand: 2 "register_operand" "f")] ++ UNSPEC_LASX_XVILVL_INTERNAL))] ++ "ISA_HAS_LASX" ++ "xvilvl.\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) + + (define_insn "smax3" + [(set (match_operand:FLASX 0 "register_operand" "=f") +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index 0a20850d6..bda95d634 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -9718,328 +9718,399 @@ loongarch_expand_vector_group_init (rtx target, rtx vals) + ops[1]))); + } + ++/* Expand initialization of a vector which has all same elements. */ ++ + void +-loongarch_expand_vector_init (rtx target, rtx vals) ++loongarch_expand_vector_init_same (rtx target, rtx vals, unsigned nvar) + { + machine_mode vmode = GET_MODE (target); + machine_mode imode = GET_MODE_INNER (vmode); +- unsigned i, nelt = GET_MODE_NUNITS (vmode); +- unsigned nvar = 0 /*, one_var = -1u*/ ; +- bool all_same = true; +- rtx x; ++ rtx same = XVECEXP (vals, 0, 0); ++ rtx temp, temp2; + +- for (i = 0; i < nelt; ++i) ++ if (CONST_INT_P (same) && nvar == 0 ++ && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) + { +- x = XVECEXP (vals, 0, i); +- if (!loongarch_constant_elt_p (x)) +- nvar++ /*, one_var = i */ ; +- if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0))) +- all_same = false; ++ switch (vmode) ++ { ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); ++ emit_move_insn (target, temp); ++ return; ++ default: ++ gcc_unreachable (); ++ } + } +- +- if (ISA_HAS_LASX && GET_MODE_SIZE (vmode) == 32) ++ temp = gen_reg_rtx (imode); ++ if (imode == GET_MODE (same)) ++ temp2 = same; ++ else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) + { +- if (all_same) ++ if (GET_CODE (same) == MEM) + { +- rtx same = XVECEXP (vals, 0, 0); +- rtx temp, temp2; +- +- if (CONST_INT_P (same) && nvar == 0 +- && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) +- { +- switch (vmode) +- { +- case E_V32QImode: +- case E_V16HImode: +- case E_V8SImode: +- case E_V4DImode: +- temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); +- emit_move_insn (target, temp); +- return; +- +- default: +- gcc_unreachable (); +- } +- } +- +- temp = gen_reg_rtx (imode); +- if (imode == GET_MODE (same)) +- temp2 = same; +- else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) +- { +- if (GET_CODE (same) == MEM) +- { +- rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); +- loongarch_emit_move (reg_tmp, same); +- temp2 = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0); +- } +- else +- temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); +- } +- else +- { +- if (GET_CODE (same) == MEM) +- { +- rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); +- loongarch_emit_move (reg_tmp, same); +- temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); +- } +- else +- temp2 = lowpart_subreg (imode, same, GET_MODE (same)); +- } +- emit_move_insn (temp, temp2); +- +- switch (vmode) +- { +- case E_V32QImode: +- case E_V16HImode: +- case E_V8SImode: +- case E_V4DImode: +- loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); +- break; +- +- case E_V8SFmode: +- emit_insn (gen_lasx_xvreplve0_w_f_scalar (target, temp)); +- break; +- +- case E_V4DFmode: +- emit_insn (gen_lasx_xvreplve0_d_f_scalar (target, temp)); +- break; +- +- default: +- gcc_unreachable (); +- } ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0); + } + else ++ temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); ++ } ++ else ++ { ++ if (GET_CODE (same) == MEM) + { +- rtvec vec = shallow_copy_rtvec (XVEC (vals, 0)); ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); ++ } ++ else ++ temp2 = lowpart_subreg (imode, same, GET_MODE (same)); ++ } ++ emit_move_insn (temp, temp2); + +- for (i = 0; i < nelt; ++i) +- RTVEC_ELT (vec, i) = CONST0_RTX (imode); ++ switch (vmode) ++ { ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); ++ break; + +- emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec)); ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvreplve0_w_f_scalar (target, temp)); ++ break; + +- machine_mode half_mode = VOIDmode; +- rtx target_hi, target_lo; ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvreplve0_d_f_scalar (target, temp)); ++ break; + +- switch (vmode) +- { +- case E_V32QImode: +- half_mode=E_V16QImode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- emit_insn (gen_vec_setv16qi (target_hi, temp_hi, GEN_INT (i))); +- emit_insn (gen_vec_setv16qi (target_lo, temp_lo, GEN_INT (i))); +- } +- emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); +- break; ++ case E_V4SFmode: ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp)); ++ break; + +- case E_V16HImode: +- half_mode=E_V8HImode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- emit_insn (gen_vec_setv8hi (target_hi, temp_hi, GEN_INT (i))); +- emit_insn (gen_vec_setv8hi (target_lo, temp_lo, GEN_INT (i))); +- } +- emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); +- break; ++ case E_V2DFmode: ++ emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp)); ++ break; + +- case E_V8SImode: +- half_mode=V4SImode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- emit_insn (gen_vec_setv4si (target_hi, temp_hi, GEN_INT (i))); +- emit_insn (gen_vec_setv4si (target_lo, temp_lo, GEN_INT (i))); +- } +- emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); +- break; ++ default: ++ gcc_unreachable (); ++ } ++} + +- case E_V4DImode: +- half_mode=E_V2DImode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- emit_insn (gen_vec_setv2di (target_hi, temp_hi, GEN_INT (i))); +- emit_insn (gen_vec_setv2di (target_lo, temp_lo, GEN_INT (i))); +- } +- /* PUT_MODE(target_hi, GET_MODE (target)); */ +- /* PUT_MODE(target_lo, GET_MODE (target)); */ +- /* emit_insn ( gen_lasx_shufi_q_v4di (target_hi, target_lo, GEN_INT(1))); */ +- /* emit_move_insn (target, target_hi); */ +- emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); +- break; ++/* Expand a vector initialization. */ + +- case E_V8SFmode: +- half_mode=E_V4SFmode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- emit_insn (gen_vec_setv4sf (target_hi, temp_hi, GEN_INT (i))); +- emit_insn (gen_vec_setv4sf (target_lo, temp_lo, GEN_INT (i))); +- } +- emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); +- break; ++void ++loongarch_expand_vector_init (rtx target, rtx vals) ++{ ++ machine_mode vmode = GET_MODE (target); ++ machine_mode imode = GET_MODE_INNER (vmode); ++ unsigned i, nelt = GET_MODE_NUNITS (vmode); ++ /* VALS is divided into high and low half-part. */ ++ /* Number of non constant elements in corresponding parts of VALS. */ ++ unsigned nvar = 0, hi_nvar = 0, lo_nvar = 0; ++ /* all_same : true if all elements of VALS are the same. ++ hi_same : true if all elements of the high half-part are the same. ++ lo_same : true if all elements of the low half-part are the same. ++ half_same : true if the high half-part is the same as the low one. */ ++ bool all_same = false, hi_same = true, lo_same = true, half_same = true; ++ rtx val[32], val_hi[32], val_lo[16]; ++ rtx x, op0, op1; ++ /* Copy one element of vals to per element of target vector. */ ++ typedef rtx (*loongarch_vec_repl1_fn) (rtx, rtx); ++ /* Copy two elements of vals to target vector. */ ++ typedef rtx (*loongarch_vec_repl2_fn) (rtx, rtx, rtx); ++ /* Insert scalar operands into the specified position of the vector. */ ++ typedef rtx (*loongarch_vec_set_fn) (rtx, rtx, rtx); ++ /* Copy 64bit lowpart to highpart. */ ++ typedef rtx (*loongarch_vec_mirror_fn) (rtx, rtx, rtx); ++ /* Merge lowpart and highpart into target. */ ++ typedef rtx (*loongarch_vec_merge_fn) (rtx, rtx, rtx, rtx); ++ ++ loongarch_vec_repl1_fn loongarch_vec_repl1_128 = NULL, ++ loongarch_vec_repl1_256 = NULL; ++ loongarch_vec_repl2_fn loongarch_vec_repl2_128 = NULL, ++ loongarch_vec_repl2_256 = NULL; ++ loongarch_vec_set_fn loongarch_vec_set128 = NULL, loongarch_vec_set256 = NULL; ++ loongarch_vec_mirror_fn loongarch_vec_mirror = NULL; ++ loongarch_vec_merge_fn loongarch_lasx_vecinit_merge = NULL; ++ machine_mode half_mode = VOIDmode; ++ ++ /* Check whether elements of each part are the same. */ ++ for (i = 0; i < nelt / 2; ++i) ++ { ++ val_hi[i] = val_hi[i + nelt / 2] = val[i + nelt / 2] ++ = XVECEXP (vals, 0, i + nelt / 2); ++ val_lo[i] = val[i] = XVECEXP (vals, 0, i); ++ if (!loongarch_constant_elt_p (val_hi[i])) ++ hi_nvar++; ++ if (!loongarch_constant_elt_p (val_lo[i])) ++ lo_nvar++; ++ if (i > 0 && !rtx_equal_p (val_hi[i], val_hi[0])) ++ hi_same = false; ++ if (i > 0 && !rtx_equal_p (val_lo[i], val_lo[0])) ++ lo_same = false; ++ if (!rtx_equal_p (val_hi[i], val_lo[i])) ++ half_same = false; ++ } ++ ++ /* If all elements are the same, set all_same true. */ ++ if (hi_same && lo_same && half_same) ++ all_same = true; ++ ++ nvar = hi_nvar + lo_nvar; + +- case E_V4DFmode: +- half_mode=E_V2DFmode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- emit_insn (gen_vec_setv2df (target_hi, temp_hi, GEN_INT (i))); +- emit_insn (gen_vec_setv2df (target_lo, temp_lo, GEN_INT (i))); +- } +- emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (vmode, target_hi, target_lo))); +- break; ++ switch (vmode) ++ { ++ case E_V32QImode: ++ half_mode = E_V16QImode; ++ loongarch_vec_set256 = gen_vec_setv32qi_internal; ++ loongarch_vec_repl1_256 = gen_lasx_xvreplgr2vr_b; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v32qi : gen_lasx_vecinit_merge_v32qi; ++ /* FALLTHRU. */ ++ case E_V16QImode: ++ loongarch_vec_set128 = gen_vec_setv16qi; ++ loongarch_vec_repl1_128 = gen_lsx_vreplgr2vr_b; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_b; ++ break; + +- default: +- gcc_unreachable(); +- } ++ case E_V16HImode: ++ half_mode = E_V8HImode; ++ loongarch_vec_set256 = gen_vec_setv16hi_internal; ++ loongarch_vec_repl1_256 = gen_lasx_xvreplgr2vr_h; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v16hi : gen_lasx_vecinit_merge_v16hi; ++ /* FALLTHRU. */ ++ case E_V8HImode: ++ loongarch_vec_set128 = gen_vec_setv8hi; ++ loongarch_vec_repl1_128 = gen_lsx_vreplgr2vr_h; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_h; ++ break; + +- } +- return; ++ case E_V8SImode: ++ half_mode = V4SImode; ++ loongarch_vec_set256 = gen_vec_setv8si; ++ loongarch_vec_repl1_256 = gen_lasx_xvreplgr2vr_w; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v8si : gen_lasx_vecinit_merge_v8si; ++ /* FALLTHRU. */ ++ case E_V4SImode: ++ loongarch_vec_set128 = gen_vec_setv4si; ++ loongarch_vec_repl1_128 = gen_lsx_vreplgr2vr_w; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_w; ++ break; ++ ++ case E_V4DImode: ++ half_mode = E_V2DImode; ++ loongarch_vec_set256 = gen_vec_setv4di; ++ loongarch_vec_repl1_256 = gen_lasx_xvreplgr2vr_d; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v4di : gen_lasx_vecinit_merge_v4di; ++ /* FALLTHRU. */ ++ case E_V2DImode: ++ loongarch_vec_set128 = gen_vec_setv2di; ++ loongarch_vec_repl1_128 = gen_lsx_vreplgr2vr_d; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_d; ++ break; ++ ++ case E_V8SFmode: ++ half_mode = E_V4SFmode; ++ loongarch_vec_set256 = gen_vec_setv8sf; ++ loongarch_vec_repl1_128 = gen_lsx_vreplvei_w_f_scalar; ++ loongarch_vec_repl2_256 = gen_lasx_xvilvl_w_f_internal; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v8sf : gen_lasx_vecinit_merge_v8sf; ++ /* FALLTHRU. */ ++ case E_V4SFmode: ++ loongarch_vec_set128 = gen_vec_setv4sf; ++ loongarch_vec_repl2_128 = gen_lsx_vilvl_w_f_internal; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_w_f; ++ break; ++ ++ case E_V4DFmode: ++ half_mode = E_V2DFmode; ++ loongarch_vec_set256 = gen_vec_setv4df; ++ loongarch_vec_repl1_128 = gen_lsx_vreplvei_d_f_scalar; ++ loongarch_vec_repl2_256 = gen_lasx_xvilvl_d_f_internal; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v4df : gen_lasx_vecinit_merge_v4df; ++ /* FALLTHRU. */ ++ case E_V2DFmode: ++ loongarch_vec_set128 = gen_vec_setv2df; ++ loongarch_vec_repl2_128 = gen_lsx_vilvl_d_f_internal; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_d_f; ++ break; ++ ++ default: ++ gcc_unreachable (); + } + +- if (ISA_HAS_LSX) ++ if (ISA_HAS_LASX && GET_MODE_SIZE (vmode) == 32) + { ++ /* If all elements are the same, just do a broadcost. */ + if (all_same) ++ loongarch_expand_vector_init_same (target, vals, nvar); ++ else + { +- rtx same = XVECEXP (vals, 0, 0); +- rtx temp, temp2; ++ gcc_assert (nelt >= 4); ++ ++ rtx target_hi, target_lo; ++ /* Write elements of high half-part in target directly. */ ++ target_hi = target; ++ target_lo = gen_reg_rtx (half_mode); ++ ++ /* If all elements of high half-part are the same, ++ just do a broadcost. Also applicable to low half-part. */ ++ if (hi_same) ++ { ++ rtx vtmp = gen_rtx_PARALLEL (vmode, gen_rtvec_v (nelt, val_hi)); ++ loongarch_expand_vector_init_same (target_hi, vtmp, hi_nvar); ++ } ++ if (lo_same) ++ { ++ rtx vtmp ++ = gen_rtx_PARALLEL (half_mode, gen_rtvec_v (nelt / 2, val_lo)); ++ loongarch_expand_vector_init_same (target_lo, vtmp, lo_nvar); ++ } + +- if (CONST_INT_P (same) && nvar == 0 +- && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) ++ for (i = 0; i < nelt / 2; ++i) + { +- switch (vmode) ++ if (!hi_same) + { +- case E_V16QImode: +- case E_V8HImode: +- case E_V4SImode: +- case E_V2DImode: +- temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); +- emit_move_insn (target, temp); +- return; +- +- default: +- gcc_unreachable (); ++ if (vmode == E_V8SFmode || vmode == E_V4DFmode) ++ { ++ /* Using xvilvl to load lowest 2 elements simultaneously ++ to reduce the number of instructions. */ ++ if (i == 1) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_hi[0]); ++ op1 = gen_reg_rtx (imode); ++ emit_move_insn (op1, val_hi[1]); ++ emit_insn ( ++ loongarch_vec_repl2_256 (target_hi, op0, op1)); ++ } ++ else if (i > 1) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_hi[i]); ++ emit_insn ( ++ loongarch_vec_set256 (target_hi, op0, GEN_INT (i))); ++ } ++ } ++ else ++ { ++ /* Assign the lowest element of val_hi to all elements ++ of target_hi. */ ++ if (i == 0) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_hi[0]); ++ emit_insn (loongarch_vec_repl1_256 (target_hi, op0)); ++ } ++ else if (!rtx_equal_p (val_hi[i], val_hi[0])) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_hi[i]); ++ emit_insn ( ++ loongarch_vec_set256 (target_hi, op0, GEN_INT (i))); ++ } ++ } ++ } ++ if (!lo_same && !half_same) ++ { ++ /* Assign the lowest element of val_lo to all elements ++ of target_lo. */ ++ if (i == 0) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_lo[0]); ++ emit_insn (loongarch_vec_repl1_128 (target_lo, op0)); ++ } ++ else if (!rtx_equal_p (val_lo[i], val_lo[0])) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_lo[i]); ++ emit_insn ( ++ loongarch_vec_set128 (target_lo, op0, GEN_INT (i))); ++ } + } + } +- temp = gen_reg_rtx (imode); +- if (imode == GET_MODE (same)) +- temp2 = same; +- else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) +- { +- if (GET_CODE (same) == MEM) +- { +- rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); +- loongarch_emit_move (reg_tmp, same); +- temp2 = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0); +- } +- else +- temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); +- } +- else +- { +- if (GET_CODE (same) == MEM) +- { +- rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); +- loongarch_emit_move (reg_tmp, same); +- temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); +- } +- else +- temp2 = lowpart_subreg (imode, same, GET_MODE (same)); +- } +- emit_move_insn (temp, temp2); +- +- switch (vmode) ++ if (half_same) + { +- case E_V16QImode: +- case E_V8HImode: +- case E_V4SImode: +- case E_V2DImode: +- loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); +- break; +- +- case E_V4SFmode: +- emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp)); +- break; +- +- case E_V2DFmode: +- emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp)); +- break; +- +- default: +- gcc_unreachable (); ++ emit_insn (loongarch_lasx_vecinit_merge (target, target_hi, ++ target_hi, const0_rtx)); ++ return; + } ++ emit_insn (loongarch_lasx_vecinit_merge (target, target_hi, target_lo, ++ GEN_INT (0x20))); + } ++ return; ++ } ++ ++ if (ISA_HAS_LSX) ++ { ++ if (all_same) ++ loongarch_expand_vector_init_same (target, vals, nvar); + else + { +- emit_move_insn (target, CONST0_RTX (vmode)); +- + for (i = 0; i < nelt; ++i) + { +- rtx temp = gen_reg_rtx (imode); +- emit_move_insn (temp, XVECEXP (vals, 0, i)); +- switch (vmode) ++ if (vmode == E_V4SFmode || vmode == E_V2DFmode) + { +- case E_V16QImode: +- emit_insn (gen_vec_setv16qi (target, temp, GEN_INT (i))); +- break; +- +- case E_V8HImode: +- emit_insn (gen_vec_setv8hi (target, temp, GEN_INT (i))); +- break; +- +- case E_V4SImode: +- emit_insn (gen_vec_setv4si (target, temp, GEN_INT (i))); +- break; +- +- case E_V2DImode: +- emit_insn (gen_vec_setv2di (target, temp, GEN_INT (i))); +- break; +- +- case E_V4SFmode: +- emit_insn (gen_vec_setv4sf (target, temp, GEN_INT (i))); +- break; +- +- case E_V2DFmode: +- emit_insn (gen_vec_setv2df (target, temp, GEN_INT (i))); +- break; +- +- default: +- gcc_unreachable (); ++ /* Using vilvl to load lowest 2 elements simultaneously to ++ reduce the number of instructions. */ ++ if (i == 1) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val[0]); ++ op1 = gen_reg_rtx (imode); ++ emit_move_insn (op1, val[1]); ++ emit_insn (loongarch_vec_repl2_128 (target, op0, op1)); ++ } ++ else if (i > 1) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val[i]); ++ emit_insn ( ++ loongarch_vec_set128 (target, op0, GEN_INT (i))); ++ } ++ } ++ else ++ { ++ if (half_same && i == nelt / 2) ++ { ++ emit_insn ( ++ loongarch_vec_mirror (target, target, const0_rtx)); ++ return; ++ } ++ /* Assign the lowest element of val to all elements of ++ target. */ ++ if (i == 0) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val[0]); ++ emit_insn (loongarch_vec_repl1_128 (target, op0)); ++ } ++ else if (!rtx_equal_p (val[i], val[0])) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val[i]); ++ emit_insn ( ++ loongarch_vec_set128 (target, op0, GEN_INT (i))); ++ } + } + } + } +@@ -10056,8 +10127,8 @@ loongarch_expand_vector_init (rtx target, rtx vals) + /* For two-part initialization, always use CONCAT. */ + if (nelt == 2) + { +- rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0)); +- rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1)); ++ rtx op0 = force_reg (imode, val[0]); ++ rtx op1 = force_reg (imode, val[1]); + x = gen_rtx_VEC_CONCAT (vmode, op0, op1); + emit_insn (gen_rtx_SET (target, x)); + return; +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +index a6962b271..688c67cc2 100644 +--- a/gcc/config/loongarch/lsx.md ++++ b/gcc/config/loongarch/lsx.md +@@ -134,6 +134,8 @@ + UNSPEC_LSX_VSTX + UNSPEC_LSX_VEXTL_QU_DU + UNSPEC_LSX_VSETEQZ_V ++ UNSPEC_LSX_VILVL_INTERNAL ++ UNSPEC_LSX_VREPLVEI_MIRROR + ]) + + ;; This attribute gives suffix for integers in VHMODE. +@@ -1477,6 +1479,18 @@ + [(set_attr "type" "simd_flog2") + (set_attr "mode" "")]) + ++;; Only for loongarch_expand_vector_init in loongarch.cc. ++;; Merge two scalar floating-point op1 and op2 into a LSX op0. ++(define_insn "lsx_vilvl__internal" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand: 1 "register_operand" "f") ++ (match_operand: 2 "register_operand" "f")] ++ UNSPEC_LSX_VILVL_INTERNAL))] ++ "ISA_HAS_LSX" ++ "vilvl.\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ + (define_insn "smax3" + [(set (match_operand:FLSX 0 "register_operand" "=f") + (smax:FLSX (match_operand:FLSX 1 "register_operand" "f") +@@ -2283,6 +2297,16 @@ + [(set_attr "type" "simd_splat") + (set_attr "mode" "")]) + ++(define_insn "lsx_vreplvei_mirror_" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (unspec: LSX [(match_operand:LSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VREPLVEI_MIRROR))] ++ "ISA_HAS_LSX" ++ "vreplvei.d\t%w0,%w1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ + (define_insn "lsx_vreplvei_" + [(set (match_operand:LSX 0 "register_operand" "=f") + (vec_duplicate:LSX +@@ -2444,6 +2468,99 @@ + DONE; + }) + ++;; Implement vec_concatv2df by vilvl.d. ++(define_insn_and_split "vec_concatv2df" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (vec_concat:V2DF ++ (match_operand:DF 1 "register_operand" "f") ++ (match_operand:DF 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "" ++ "&& reload_completed" ++ [(const_int 0)] ++{ ++ emit_insn (gen_lsx_vilvl_d_f (operands[0], ++ gen_rtx_REG (V2DFmode, REGNO (operands[1])), ++ gen_rtx_REG (V2DFmode, REGNO (operands[2])))); ++ DONE; ++} ++ [(set_attr "mode" "V2DF")]) ++ ++;; Implement vec_concatv4sf. ++;; Optimize based on hardware register allocation of operands. ++(define_insn_and_split "vec_concatv4sf" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_concat:V4SF ++ (vec_concat:V2SF ++ (match_operand:SF 1 "register_operand" "f") ++ (match_operand:SF 2 "register_operand" "f")) ++ (vec_concat:V2SF ++ (match_operand:SF 3 "register_operand" "f") ++ (match_operand:SF 4 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "" ++ "&& reload_completed" ++ [(const_int 0)] ++{ ++ operands[5] = GEN_INT (1); ++ operands[6] = GEN_INT (2); ++ operands[7] = GEN_INT (4); ++ operands[8] = GEN_INT (8); ++ ++ /* If all input are same, use vreplvei.w to broadcast. */ ++ if (REGNO (operands[1]) == REGNO (operands[2]) ++ && REGNO (operands[1]) == REGNO (operands[3]) ++ && REGNO (operands[1]) == REGNO (operands[4])) ++ { ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (operands[0], operands[1])); ++ } ++ /* If op0 is equal to op3, use vreplvei.w to set each element of op0 as op3. ++ If other input is different from op3, use vextrins.w to insert. */ ++ else if (REGNO (operands[0]) == REGNO (operands[3])) ++ { ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (operands[0], operands[3])); ++ if (REGNO (operands[1]) != REGNO (operands[3])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[1], ++ operands[0], operands[5])); ++ if (REGNO (operands[2]) != REGNO (operands[3])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[2], ++ operands[0], operands[6])); ++ if (REGNO (operands[4]) != REGNO (operands[3])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[4], ++ operands[0], operands[8])); ++ } ++ /* If op0 is equal to op4, use vreplvei.w to set each element of op0 as op4. ++ If other input is different from op4, use vextrins.w to insert. */ ++ else if (REGNO (operands[0]) == REGNO (operands[4])) ++ { ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (operands[0], operands[4])); ++ if (REGNO (operands[1]) != REGNO (operands[4])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[1], ++ operands[0], operands[5])); ++ if (REGNO (operands[2]) != REGNO (operands[4])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[2], ++ operands[0], operands[6])); ++ if (REGNO (operands[3]) != REGNO (operands[4])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[3], ++ operands[0], operands[7])); ++ } ++ /* Otherwise, use vilvl.w to merge op1 and op2 first. ++ If op3 is different from op1, use vextrins.w to insert. ++ If op4 is different from op2, use vextrins.w to insert. */ ++ else ++ { ++ emit_insn ( ++ gen_lsx_vilvl_w_f (operands[0], ++ gen_rtx_REG (V4SFmode, REGNO (operands[1])), ++ gen_rtx_REG (V4SFmode, REGNO (operands[2])))); ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[3], ++ operands[0], operands[7])); ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[4], ++ operands[0], operands[8])); ++ } ++ DONE; ++} ++ [(set_attr "mode" "V4SF")]) + + (define_insn "vandn3" + [(set (match_operand:LSX 0 "register_operand" "=f") +@@ -3455,3 +3572,20 @@ + const0_rtx)); + DONE; + }) ++ ++;; Delete one of two instructions that exactly play the same role. ++(define_peephole2 ++ [(set (match_operand:V2DI 0 "register_operand") ++ (vec_duplicate:V2DI (match_operand:DI 1 "register_operand"))) ++ (set (match_operand:V2DI 2 "register_operand") ++ (vec_merge:V2DI ++ (vec_duplicate:V2DI (match_operand:DI 3 "register_operand")) ++ (match_operand:V2DI 4 "register_operand") ++ (match_operand 5 "const_int_operand")))] ++ "operands[0] == operands[2] && ++ operands[1] == operands[3] && ++ operands[2] == operands[4] && ++ INTVAL (operands[5]) == 2" ++ [(set (match_dup 0) ++ (vec_duplicate:V2DI (match_dup 1)))] ++ "") +diff --git a/gcc/testsuite/gcc.target/loongarch/lasx-vec-construct-opt.c b/gcc/testsuite/gcc.target/loongarch/lasx-vec-construct-opt.c +new file mode 100644 +index 000000000..487816a48 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/lasx-vec-construct-opt.c +@@ -0,0 +1,102 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mlasx -O3" } */ ++ ++#include ++ ++extern long long *x_di; ++extern int *x_si; ++extern short int *x_hi; ++extern char *x_qi; ++extern double *y_df; ++extern float *y_sf; ++ ++/* Remove some unnecessary vinsgr2vr.d as the corresponding elements ++ have already been set. */ ++/* { dg-final { scan-assembler-not "v4i64:.*\tvinsgr2vr\\.d.*v4i64" } } */ ++/* { dg-final { scan-assembler-times "v4i64:.*\txvldrepl\\.d.*v4i64" 1 } } */ ++v4i64 ++vec_construct_v4i64 () ++{ ++ v4i64 res = ++ { x_di[0], x_di[0], x_di[1], x_di[1] } ++ ; ++ return res; ++} ++ ++/* Remove some unnecessary vinsgr2vr.w as the corresponding elements ++ have already been set. */ ++/* { dg-final { scan-assembler-not "v8i32:.*\tvinsgr2vr\\.w.*v8i32" } } */ ++/* { dg-final { scan-assembler-times "v8i32:.*\txvreplgr2vr\\.w.*v8i32" 1 } } */ ++v8i32 ++vec_construct_v8i32 () ++{ ++ v8i32 res = ++ { x_si[0], x_si[0], x_si[0], x_si[0], ++ x_si[0], x_si[2], x_si[0], x_si[0] } ++ ; ++ return res; ++} ++ ++/* Remove some unnecessary vinsgr2vr.h as the corresponding elements ++ have already been set. */ ++/* { dg-final { scan-assembler-not "v16i16:.*\tvori\\.b.*v16i16" } } */ ++/* { dg-final { scan-assembler-times "v16i16:.*\txvreplgr2vr\\.h.*v16i1" 1 } } */ ++v16i16 ++vec_construct_v16i16 () ++{ ++ v16i16 res = ++ { x_hi[1], x_hi[2], x_hi[1], x_hi[1], ++ x_hi[1], x_hi[1], x_hi[1], x_hi[1], ++ x_hi[1], x_hi[1], x_hi[1], x_hi[1], ++ x_hi[1], x_hi[1], x_hi[1], x_hi[2] } ++ ; ++ return res; ++} ++ ++/* Remove some unnecessary vinsgr2vr.b as the corresponding elements ++ have already been set. */ ++/* { dg-final { scan-assembler-not "v32i8:.*\tvori\\.b.*v32i8" } } */ ++/* { dg-final { scan-assembler-times "v32i8:.*\txvreplgr2vr\\.b.*v32i8" 1 } } */ ++v32i8 ++vec_construct_v32i8 () ++{ ++ v32i8 res = ++ { x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[2], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[3] } ++ ; ++ return res; ++} ++ ++/* Set 2 elements of a vector simultaneously by vilvl.d ++ and reducing more vextrins.d. */ ++/* { dg-final { scan-assembler-not "v4f64:.*\tvori\\.b.*v4f64" } } */ ++/* { dg-final { scan-assembler-not "v4f64:.*\tvextrins\\.d.*v4f64" } } */ ++/* { dg-final { scan-assembler-times "v4f64:.*\tvilvl\\.d.*v4f64" 1 } } */ ++v4f64 ++vec_construct_v4f64 () ++{ ++ v4f64 res = ++ { y_df[0], y_df[2], y_df[0], y_df[0]} ++ ; ++ return res; ++} ++ ++/* Set 2 elements of a vector simultaneously by vilvl.w ++ and reducing more vextrins.w. */ ++/* { dg-final { scan-assembler-not "v8f32:.*\tvextrins\\.w.*v8f32" } } */ ++/* { dg-final { scan-assembler-times "v8f32:.*\txvilvl\\.w.*v8f32" 1 } } */ ++v8f32 ++vec_construct_v8f32 () ++{ ++ v8f32 res = ++ { y_sf[2], y_sf[1], y_sf[2], y_sf[3], ++ y_sf[2], y_sf[1], y_sf[2], y_sf[3] } ++ ; ++ return res; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/lsx-vec-construct-opt.c b/gcc/testsuite/gcc.target/loongarch/lsx-vec-construct-opt.c +new file mode 100644 +index 000000000..92da1c8af +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/lsx-vec-construct-opt.c +@@ -0,0 +1,85 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mlsx -O3" } */ ++ ++#include ++ ++extern long long *x_di; ++extern int *x_si; ++extern short int *x_hi; ++extern char *x_qi; ++extern double *y_df; ++extern float *y_sf; ++ ++/* No change for V2DI mode. */ ++v2i64 ++vec_construct_v2i64 () ++{ ++ v2i64 res = ++ { x_di[1], x_di[0]} ++ ; ++ return res; ++} ++ ++/* Only load the lowest 2 elements and directly copy them to high half-part, ++ reducing more vinsgr2vr.w. */ ++/* { dg-final { scan-assembler-times "v4i32:.*\tvreplvei\\.d.*v4i32" 1 } } */ ++v4i32 ++vec_construct_v4i32 () ++{ ++ v4i32 res = ++ { x_si[0], x_si[1], x_si[0], x_si[1]} ++ ; ++ return res; ++} ++ ++/* Only load the lowest 4 elements and directly copy them to high half-part, ++ reducing more vinsgr2vr.h. */ ++/* { dg-final { scan-assembler-times "v8i16:.*\tvreplvei\\.d.*v8i16" 1 } } */ ++v8i16 ++vec_construct_v8i16 () ++{ ++ v8i16 res = ++ { x_hi[0], x_hi[0], x_hi[0], x_hi[1], ++ x_hi[0], x_hi[0], x_hi[0], x_hi[1] } ++ ; ++ return res; ++} ++ ++/* Only load the lowest 8 elements and directly copy them to high half-part, ++ reducing more vinsgr2vr.b. */ ++/* { dg-final { scan-assembler-times "v16i8:.*\tvreplvei\\.d.*v16i8" 1 } } */ ++v16i8 ++vec_construct_v16i8 () ++{ ++ v16i8 res = ++ { x_qi[0], x_qi[1], x_qi[0], x_qi[2], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[3], ++ x_qi[0], x_qi[1], x_qi[0], x_qi[2], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[3] } ++ ; ++ return res; ++} ++ ++/* Set 2 elements of a vector simultaneously by vilvl.d. */ ++/* { dg-final { scan-assembler-not "v2f64:.*\tvextrins\\.d.*v2f64" } } */ ++/* { dg-final { scan-assembler-times "v2f64:.*\tvilvl\\.d.*v2f64" 1 } } */ ++v2f64 ++vec_construct_v2f64 () ++{ ++ v2f64 res = ++ { y_df[0], y_df[2] } ++ ; ++ return res; ++} ++ ++/* Set 2 elements of a vector simultaneously by vilvl.w ++ and reducing more vextrins.w. */ ++/* { dg-final { scan-assembler-times "v4f32:.*\tvilvl\\.w.*v4f32" 1 } } */ ++v4f32 ++vec_construct_v4f32 () ++{ ++ v4f32 res = ++ { y_sf[0], y_sf[1], y_sf[0], y_sf[0] } ++ ; ++ return res; ++} +-- +2.43.5 + diff --git a/LoongArch-Remove-bash-syntax-from-config.gcc.patch b/LoongArch-Remove-bash-syntax-from-config.gcc.patch new file mode 100644 index 0000000..26f087d --- /dev/null +++ b/LoongArch-Remove-bash-syntax-from-config.gcc.patch @@ -0,0 +1,140 @@ +From 2d22c2b4a8f0df49cb654bc36d3f934987731504 Mon Sep 17 00:00:00 2001 +From: Yang Yujie +Date: Mon, 22 Jul 2024 09:25:38 +0800 +Subject: [PATCH 30/30] LoongArch: Remove bash syntax ';&' from config.gcc. + +Change-Id: I87d16edd42bccd615b811214eb5860a0263f003b +--- + gcc/config.gcc | 44 ++++++++++++++++++++++++-------------------- + 1 file changed, 24 insertions(+), 20 deletions(-) + +diff --git a/gcc/config.gcc b/gcc/config.gcc +index cca2e6e43..3f4f79e79 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -4684,22 +4684,24 @@ case "${target}" in + # multilib library builds, unless otherwise specified + # in --with-multilib-list. + with_multilib_default="/march=abi-default" +- parse_state=opts + ;; + arch,fixed) + # Fixed: use the default gcc configuration for all multilib + # builds by default. + with_multilib_default="" +- parse_state=opts + ;; + arch,*) + with_multilib_default="/march=abi-default" +- parse_state=opts +- ;& ++ with_multilib_default="${with_multilib_default}/${component}" ++ ;; + opts,*) + with_multilib_default="${with_multilib_default}/${component}" + ;; + esac ++ ++ if test x${parse_state} = xarch; then ++ parse_state=opts; ++ fi + done + unset parse_state component + fi +@@ -4737,8 +4739,7 @@ case "${target}" in + parse_state="abi-base" + + for component in $(echo "${elem}" | tr '/' ' '); do +- case ${parse_state} in +- abi-base) ++ if test x${parse_state} = x"abi-base"; then + # Base ABI type + case ${component} in + lp64 | lp64d) elem_tmp="ABI_BASE_LP64D,";; +@@ -4754,9 +4755,10 @@ case "${target}" in + elem_abi_base="${component}" + + parse_state="abi-ext" +- ;; ++ continue ++ fi + +- abi-ext) ++ if test x${parse_state} = x"abi-ext"; then + # ABI extension type + case ${component} in + base) +@@ -4764,7 +4766,7 @@ case "${target}" in + loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," + loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. + parse_state="arch" +- continue; ++ continue + ;; + esac + +@@ -4773,15 +4775,17 @@ case "${target}" in + loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," + loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. + parse_state="arch" +- ;& ++ fi + +- arch) ++ if test x${parse_state} = x"arch"; then + # -march option + case ${component} in + abi-default | loongarch64 | la[2346]64) # OK, append here. + # Append -march spec for each multilib variant. + loongarch_multilib_list_make="${loongarch_multilib_list_make}/march=${component}" +- ;& ++ parse_state="opts" ++ continue ++ ;; + + default) + # "/default" is equivalent to --with-multilib-default=fixed +@@ -4793,9 +4797,9 @@ case "${target}" in + # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. + loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" + parse_state="opts" +- ;& ++ fi + +- opts) ++ if test x${parse_state} = x"opts"; then + # Other compiler options for building libraries. + # (no static sanity check performed) + case ${component} in +@@ -4806,22 +4810,22 @@ case "${target}" in + loongarch_multilib_list_make="${loongarch_multilib_list_make}/${component}" + ;; + esac +- ;; +- +- esac ++ fi + done + + case ${parse_state} in + "abi-ext") + elem_abi_ext="base" + loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE," +- loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now. +- ;& ++ # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. ++ loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" ++ ;; + "arch") + # If ARCH is unspecified for this multilib variant, use ${with_multllib_default}. + loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}" +- ;& ++ ;; + "opts") ++ : + ;; + esac + +-- +2.43.5 + diff --git a/LoongArch-Use-LSX-for-scalar-FP-rounding-with-explic.patch b/LoongArch-Use-LSX-for-scalar-FP-rounding-with-explic.patch new file mode 100644 index 0000000..3a8723e --- /dev/null +++ b/LoongArch-Use-LSX-for-scalar-FP-rounding-with-explic.patch @@ -0,0 +1,106 @@ +From 3f37ed43aa0ea3082a75be60e203eaee4ebdc1a6 Mon Sep 17 00:00:00 2001 +From: Jiahao Xu +Date: Sat, 30 Dec 2023 09:34:26 +0800 +Subject: [PATCH 24/30] LoongArch: Use LSX for scalar FP rounding with explicit + rounding mode + +In LoongArch FP base ISA there is only the frint.{s/d} instruction which +reads the global rounding mode. Utilize LSX for explicit rounding mode +even if the operand is scalar. It seems wasting the CPU power, but +still much faster than calling the library function. + +co-authored-By: Xi Ruoyao + +Change-Id: I7d0af4da52249637a2d2f87e6029a4bb31bbd0bb +--- + gcc/config/loongarch/loongarch.c | 5 +++ + gcc/config/loongarch/loongarch.md | 57 +++++++++++++++++++++++++++++++ + 2 files changed, 62 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index 3cae3cc9b..6b4db1c45 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -6294,6 +6294,11 @@ loongarch_can_change_mode_class (machine_mode from, machine_mode to, + if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to)) + return true; + ++ /* Allow conversion between LSX vector mode and scalar fp mode. */ ++ if ((LSX_SUPPORTED_MODE_P (from) && SCALAR_FLOAT_MODE_P (to)) ++ || ((SCALAR_FLOAT_MODE_P (from) && LSX_SUPPORTED_MODE_P (to)))) ++ return true; ++ + return !reg_classes_intersect_p (FP_REGS, rclass); + } + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index d6203ed1a..e85200af8 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -3699,6 +3699,63 @@ + [(set_attr "type" "unknown") + (set_attr "mode" "")]) + ++(define_mode_attr lsxmode [(SF "v4sf") (DF "v2df")]) ++ ++(define_expand "ceil2" ++ [(match_operand:ANYF 0 "register_operand") ++ (match_operand:ANYF 1 "register_operand")] ++ "ISA_HAS_LSX && (flag_fp_int_builtin_inexact || !flag_trapping_math)" ++{ ++ machine_mode lsx_mode ++ = mode == SFmode ? V4SFmode : V2DFmode; ++ rtx tmp = gen_reg_rtx (lsx_mode); ++ emit_insn (gen_rtx_SET (tmp, gen_rtx_VEC_DUPLICATE (lsx_mode, operands[1]))); ++ emit_insn (gen_ceil2 (tmp, tmp)); ++ emit_move_insn (operands[0], ++ lowpart_subreg (mode, tmp, lsx_mode)); ++ DONE; ++}) ++ ++(define_expand "floor2" ++ [(match_operand:ANYF 0 "register_operand") ++ (match_operand:ANYF 1 "register_operand")] ++ "ISA_HAS_LSX && (flag_fp_int_builtin_inexact || !flag_trapping_math)" ++{ ++ machine_mode lsx_mode ++ = mode == SFmode ? V4SFmode : V2DFmode; ++ rtx tmp = gen_reg_rtx (lsx_mode); ++ emit_insn (gen_rtx_SET (tmp, gen_rtx_VEC_DUPLICATE (lsx_mode, operands[1]))); ++ emit_insn (gen_floor2 (tmp, tmp)); ++ emit_move_insn (operands[0], ++ lowpart_subreg (mode, tmp, lsx_mode)); ++ DONE; ++}) ++ ++;; Round floating-point numbers to integers ++(define_insn "rint2" ++ [(set (match_operand:ANYF 0 "register_operand" "=f") ++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] ++ UNSPEC_FRINT))] ++ "" ++ "frint.\t%0,%1" ++ [(set_attr "type" "fcvt") ++ (set_attr "mode" "")]) ++ ++(define_expand "btrunc2" ++ [(match_operand:ANYF 0 "register_operand") ++ (match_operand:ANYF 1 "register_operand")] ++ "ISA_HAS_LSX && (flag_fp_int_builtin_inexact || !flag_trapping_math)" ++{ ++ machine_mode lsx_mode ++ = mode == SFmode ? V4SFmode : V2DFmode; ++ rtx tmp = gen_reg_rtx (lsx_mode); ++ emit_insn (gen_rtx_SET (tmp, gen_rtx_VEC_DUPLICATE (lsx_mode, operands[1]))); ++ emit_insn (gen_btrunc2 (tmp, tmp)); ++ emit_move_insn (operands[0], ++ lowpart_subreg (mode, tmp, lsx_mode)); ++ DONE; ++}) ++ + ;; Synchronization instructions. + + (include "sync.md") +-- +2.43.5 + diff --git a/LoongArch-Use-simplify_gen_subreg-instead-of-gen_rtx.patch b/LoongArch-Use-simplify_gen_subreg-instead-of-gen_rtx.patch new file mode 100644 index 0000000..d39f809 --- /dev/null +++ b/LoongArch-Use-simplify_gen_subreg-instead-of-gen_rtx.patch @@ -0,0 +1,180 @@ +From 6d4365a3a259af91b5cccb892e97fc4d799d792e Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 07:31:20 +0000 +Subject: [PATCH 21/30] LoongArch: Use simplify_gen_subreg instead of + gen_rtx_SUBREG directly. + +Signed-off-by: Peng Fan +--- + gcc/config/loongarch/loongarch.c | 68 ++++++++++++++++++-------------- + 1 file changed, 38 insertions(+), 30 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index bda95d634..cd0a7f4ee 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -7924,13 +7924,13 @@ loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d) + if (d->vmode == E_V2DFmode) + { + sel = gen_rtx_CONST_VECTOR (E_V2DImode, gen_rtvec_v (d->nelt, rperm)); +- tmp = gen_rtx_SUBREG (E_V2DImode, d->target, 0); ++ tmp = simplify_gen_subreg (E_V2DImode, d->target, d->vmode, 0); + emit_move_insn (tmp, sel); + } + else if (d->vmode == E_V4SFmode) + { + sel = gen_rtx_CONST_VECTOR (E_V4SImode, gen_rtvec_v (d->nelt, rperm)); +- tmp = gen_rtx_SUBREG (E_V4SImode, d->target, 0); ++ tmp = simplify_gen_subreg (E_V4SImode, d->target, d->vmode, 0); + emit_move_insn (tmp, sel); + } + else +@@ -8701,8 +8701,8 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + // Adjust op1 for selecting correct value in high 128bit of target + // register. + // op1: E_V4DImode, { 4, 5, 6, 7 } -> { 2, 3, 4, 5 } +- rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); +- rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0); ++ rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0); + emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, + conv_op0, GEN_INT (0x21))); + +@@ -8731,8 +8731,8 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + emit_move_insn (op0_alt, d->op0); + + // Generate subreg for fitting into insn gen function. +- rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); +- rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0); ++ rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0); + + // Adjust op value in temp register. + // op0 = {0,1,2,3}, op1 = {4,5,0,1} +@@ -8777,9 +8777,10 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + emit_move_insn (op1_alt, d->op1); + emit_move_insn (op0_alt, d->op0); + +- rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); +- rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); +- rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0); ++ rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0); ++ rtx conv_target = simplify_gen_subreg (E_V4DImode, d->target, ++ d->vmode, 0); + + emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, + conv_op0, GEN_INT (0x02))); +@@ -8811,9 +8812,10 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + // Selector sample: E_V4DImode, { 0, 1, 4 ,5 } + if (!d->testing_p) + { +- rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); +- rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); +- rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ rtx conv_op1 = simplify_gen_subreg (E_V4DImode, d->op1, d->vmode, 0); ++ rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0); ++ rtx conv_target = simplify_gen_subreg (E_V4DImode, d->target, ++ d->vmode, 0); + + // We can achieve the expectation by using sinple xvpermi.q insn. + emit_move_insn (conv_target, conv_op1); +@@ -8838,8 +8840,8 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + emit_move_insn (op1_alt, d->op1); + emit_move_insn (op0_alt, d->op0); + +- rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); +- rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); ++ rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0); ++ rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0); + // Adjust op value in temp regiter. + // op0 = { 0, 1, 2, 3 }, op1 = { 6, 7, 2, 3 } + emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, +@@ -8881,9 +8883,10 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + emit_move_insn (op1_alt, d->op1); + emit_move_insn (op0_alt, d->op0); + +- rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); +- rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); +- rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0); ++ rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0); ++ rtx conv_target = simplify_gen_subreg (E_V4DImode, d->target, ++ d->vmode, 0); + + emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1, + conv_op0, GEN_INT (0x13))); +@@ -8915,10 +8918,11 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + // Selector sample:E_V8SImode, { 2, 2, 2, 2, 2, 2, 2, 2 } + if (!d->testing_p) + { +- rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); +- rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); ++ rtx conv_op1 = simplify_gen_subreg (E_V4DImode, d->op1, d->vmode, 0); ++ rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0); + rtx temp_reg = gen_reg_rtx (d->vmode); +- rtx conv_temp = gen_rtx_SUBREG (E_V4DImode, temp_reg, 0); ++ rtx conv_temp = simplify_gen_subreg (E_V4DImode, temp_reg, ++ d->vmode, 0); + + emit_move_insn (temp_reg, d->op0); + +@@ -9021,9 +9025,11 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + emit_move_insn (op0_alt, d->op0); + emit_move_insn (op1_alt, d->op1); + +- rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); +- rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); +- rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); ++ rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0); ++ rtx conv_op0a = simplify_gen_subreg (E_V4DImode, op0_alt, ++ d->vmode, 0); ++ rtx conv_op1a = simplify_gen_subreg (E_V4DImode, op1_alt, ++ d->vmode, 0); + + // Duplicate op0's low 128bit in op0, then duplicate high 128bit + // in op1. After this, xvshuf.* insn's selector argument can +@@ -9056,10 +9062,12 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + emit_move_insn (op0_alt, d->op0); + emit_move_insn (op1_alt, d->op1); + +- rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0); +- rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0); +- rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0); +- rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0); ++ rtx conv_op0a = simplify_gen_subreg (E_V4DImode, op0_alt, ++ d->vmode, 0); ++ rtx conv_op1a = simplify_gen_subreg (E_V4DImode, op1_alt, ++ d->vmode, 0); ++ rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0); ++ rtx conv_op1 = simplify_gen_subreg (E_V4DImode, d->op1, d->vmode, 0); + + // Reorganize op0's hi/lo 128bit and op1's hi/lo 128bit, to make sure + //that selector's low 128bit can access all op0's elements, and +@@ -9177,12 +9185,12 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + { + case E_V4DFmode: + sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (d->nelt, rperm)); +- tmp = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ tmp = simplify_gen_subreg (E_V4DImode, d->target, d->vmode, 0); + emit_move_insn (tmp, sel); + break; + case E_V8SFmode: + sel = gen_rtx_CONST_VECTOR (E_V8SImode, gen_rtvec_v (d->nelt, rperm)); +- tmp = gen_rtx_SUBREG (E_V8SImode, d->target, 0); ++ tmp = simplify_gen_subreg (E_V8SImode, d->target, d->vmode, 0); + emit_move_insn (tmp, sel); + break; + default: +@@ -9262,7 +9270,7 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d) + // 64bit in target vector register. + else if (extract_ev_od) + { +- rtx converted = gen_rtx_SUBREG (E_V4DImode, d->target, 0); ++ rtx converted = simplify_gen_subreg (E_V4DImode, d->target, d->vmode, 0); + emit_insn (gen_lasx_xvpermi_d_v4di (converted, converted, GEN_INT (0xD8))); + } + +-- +2.43.5 + diff --git a/LoongArch-add-gnat-ada-compiler-support.patch b/LoongArch-add-gnat-ada-compiler-support.patch new file mode 100644 index 0000000..84b7887 --- /dev/null +++ b/LoongArch-add-gnat-ada-compiler-support.patch @@ -0,0 +1,382 @@ +From 5082b2110e39c41b6db624150a30e60ec7ba32d8 Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 07:08:10 +0000 +Subject: [PATCH 10/30] LoongArch: add gnat (ada compiler) support + +Signed-off-by: Peng Fan +--- + gcc/ada/gcc-interface/Makefile.in | 48 +++++++ + gcc/ada/libgnarl/s-linux__loongarch.ads | 133 +++++++++++++++++++ + gcc/ada/libgnat/system-linux-loongarch.ads | 147 +++++++++++++++++++++ + gcc/config/loongarch/t-linux | 2 +- + 4 files changed, 329 insertions(+), 1 deletion(-) + create mode 100644 gcc/ada/libgnarl/s-linux__loongarch.ads + create mode 100644 gcc/ada/libgnat/system-linux-loongarch.ads + +diff --git a/gcc/ada/gcc-interface/Makefile.in b/gcc/ada/gcc-interface/Makefile.in +index 61d1e105b..728ca5666 100644 +--- a/gcc/ada/gcc-interface/Makefile.in ++++ b/gcc/ada/gcc-interface/Makefile.in +@@ -1502,6 +1502,54 @@ ifeq ($(strip $(filter-out cygwin% mingw32% pe,$(target_os))),) + LIBRARY_VERSION := $(LIB_VERSION) + endif + ++ ++# LoongArch Linux ++ifeq ($(strip $(filter-out loongarch% linux%,$(target_cpu) $(target_os))),) ++ LIBGNAT_TARGET_PAIRS = \ ++ a-exetim.adb. -- ++-- -- ++------------------------------------------------------------------------------ ++ ++-- This is the LoongArch version of this package ++ ++-- This package encapsulates cpu specific differences between implementations ++-- of GNU/Linux, in order to share s-osinte-linux.ads. ++ ++-- PLEASE DO NOT add any with-clauses to this package or remove the pragma ++-- Preelaborate. This package is designed to be a bottom-level (leaf) package ++ ++with Interfaces.C; ++ ++package System.Linux is ++ pragma Preelaborate; ++ ++ ---------- ++ -- Time -- ++ ---------- ++ ++ subtype int is Interfaces.C.int; ++ subtype long is Interfaces.C.long; ++ subtype suseconds_t is Interfaces.C.long; ++ subtype time_t is Interfaces.C.long; ++ subtype clockid_t is Interfaces.C.int; ++ ++ type timespec is record ++ tv_sec : time_t; ++ tv_nsec : long; ++ end record; ++ pragma Convention (C, timespec); ++ ++ type timeval is record ++ tv_sec : time_t; ++ tv_usec : suseconds_t; ++ end record; ++ pragma Convention (C, timeval); ++ ++ ----------- ++ -- Errno -- ++ ----------- ++ ++ EAGAIN : constant := 11; ++ EINTR : constant := 4; ++ EINVAL : constant := 22; ++ ENOMEM : constant := 12; ++ EPERM : constant := 1; ++ ETIMEDOUT : constant := 110; ++ ++ ------------- ++ -- Signals -- ++ ------------- ++ ++ SIGHUP : constant := 1; -- hangup ++ SIGINT : constant := 2; -- interrupt (rubout) ++ SIGQUIT : constant := 3; -- quit (ASCD FS) ++ SIGILL : constant := 4; -- illegal instruction (not reset) ++ SIGTRAP : constant := 5; -- trace trap (not reset) ++ SIGIOT : constant := 6; -- IOT instruction ++ SIGABRT : constant := 6; -- used by abort, replace SIGIOT in the future ++ SIGBUS : constant := 7; -- bus error ++ SIGFPE : constant := 8; -- floating point exception ++ SIGKILL : constant := 9; -- kill (cannot be caught or ignored) ++ SIGUSR1 : constant := 10; -- user defined signal 1 ++ SIGSEGV : constant := 11; -- segmentation violation ++ SIGUSR2 : constant := 12; -- user defined signal 2 ++ SIGPIPE : constant := 13; -- write on a pipe with no one to read it ++ SIGALRM : constant := 14; -- alarm clock ++ SIGTERM : constant := 15; -- software termination signal from kill ++ SIGSTKFLT : constant := 16; -- coprocessor stack fault (Linux) ++ SIGCLD : constant := 17; -- alias for SIGCHLD ++ SIGCHLD : constant := 17; -- child status change ++ SIGCONT : constant := 18; -- stopped process has been continued ++ SIGSTOP : constant := 19; -- stop (cannot be caught or ignored) ++ SIGTSTP : constant := 20; -- user stop requested from tty ++ SIGTTIN : constant := 21; -- background tty read attempted ++ SIGTTOU : constant := 22; -- background tty write attempted ++ SIGURG : constant := 23; -- urgent condition on IO channel ++ SIGXCPU : constant := 24; -- CPU time limit exceeded ++ SIGXFSZ : constant := 25; -- filesize limit exceeded ++ SIGVTALRM : constant := 26; -- virtual timer expired ++ SIGPROF : constant := 27; -- profiling timer expired ++ SIGWINCH : constant := 28; -- window size change ++ SIGPOLL : constant := 29; -- pollable event occurred ++ SIGIO : constant := 29; -- I/O now possible (4.2 BSD) ++ SIGPWR : constant := 30; -- power-fail restart ++ SIGSYS : constant := 31; -- bad system call ++ ++ SIGLTHRRES : constant := 0; -- GNU/LinuxThreads restart signal ++ SIGLTHRCAN : constant := 0; -- GNU/LinuxThreads cancel signal ++ SIGLTHRDBG : constant := 0; -- GNU/LinuxThreads debugger signal ++ ++ -- These don't exist for Linux/LoongArch. The constants are present ++ -- so that we can continue to use a-intnam-linux.ads. ++ SIGLOST : constant := 0; -- File lock lost ++ SIGUNUSED : constant := 0; -- unused signal (GNU/Linux) ++ SIGEMT : constant := 0; -- EMT ++ ++ -- struct_sigaction offsets ++ ++ sa_handler_pos : constant := 0; ++ sa_mask_pos : constant := Standard'Address_Size / 8; ++ sa_flags_pos : constant := 128 + sa_mask_pos; ++ ++ SA_SIGINFO : constant := 16#04#; ++ SA_ONSTACK : constant := 16#08000000#; ++ ++end System.Linux; +diff --git a/gcc/ada/libgnat/system-linux-loongarch.ads b/gcc/ada/libgnat/system-linux-loongarch.ads +new file mode 100644 +index 000000000..312fae0f6 +--- /dev/null ++++ b/gcc/ada/libgnat/system-linux-loongarch.ads +@@ -0,0 +1,147 @@ ++------------------------------------------------------------------------------ ++-- -- ++-- GNAT RUN-TIME COMPONENTS -- ++-- -- ++-- S Y S T E M -- ++-- -- ++-- S p e c -- ++-- (GNU-Linux/LoongArch Version) -- ++-- -- ++-- Copyright (C) 1992-2018, Free Software Foundation, Inc. -- ++-- -- ++-- This specification is derived from the Ada Reference Manual for use with -- ++-- GNAT. The copyright notice above, and the license provisions that follow -- ++-- apply solely to the contents of the part following the private keyword. -- ++-- -- ++-- GNAT is free software; you can redistribute it and/or modify it under -- ++-- terms of the GNU General Public License as published by the Free Soft- -- ++-- ware Foundation; either version 3, or (at your option) any later ver- -- ++-- sion. GNAT is distributed in the hope that it will be useful, but WITH- -- ++-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -- ++-- or FITNESS FOR A PARTICULAR PURPOSE. -- ++-- -- ++-- As a special exception under Section 7 of GPL version 3, you are granted -- ++-- additional permissions described in the GCC Runtime Library Exception, -- ++-- version 3.1, as published by the Free Software Foundation. -- ++-- -- ++-- You should have received a copy of the GNU General Public License and -- ++-- a copy of the GCC Runtime Library Exception along with this program; -- ++-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -- ++-- . -- ++-- -- ++-- GNAT was originally developed by the GNAT team at New York University. -- ++-- Extensive contributions were provided by Ada Core Technologies Inc. -- ++-- -- ++------------------------------------------------------------------------------ ++ ++package System is ++ pragma Pure; ++ -- Note that we take advantage of the implementation permission to make ++ -- this unit Pure instead of Preelaborable; see RM 13.7.1(15). In Ada ++ -- 2005, this is Pure in any case (AI-362). ++ ++ pragma No_Elaboration_Code_All; ++ -- Allow the use of that restriction in units that WITH this unit ++ ++ type Name is (SYSTEM_NAME_GNAT); ++ System_Name : constant Name := SYSTEM_NAME_GNAT; ++ ++ -- System-Dependent Named Numbers ++ ++ Min_Int : constant := Long_Long_Integer'First; ++ Max_Int : constant := Long_Long_Integer'Last; ++ ++ Max_Binary_Modulus : constant := 2 ** Long_Long_Integer'Size; ++ Max_Nonbinary_Modulus : constant := Integer'Last; ++ ++ Max_Base_Digits : constant := Long_Long_Float'Digits; ++ Max_Digits : constant := Long_Long_Float'Digits; ++ ++ Max_Mantissa : constant := 63; ++ Fine_Delta : constant := 2.0 ** (-Max_Mantissa); ++ ++ Tick : constant := 0.000_001; ++ ++ -- Storage-related Declarations ++ ++ type Address is private; ++ pragma Preelaborable_Initialization (Address); ++ Null_Address : constant Address; ++ ++ Storage_Unit : constant := 8; ++ Word_Size : constant := Standard'Word_Size; ++ Memory_Size : constant := 2 ** Long_Integer'Size; ++ ++ -- Address comparison ++ ++ function "<" (Left, Right : Address) return Boolean; ++ function "<=" (Left, Right : Address) return Boolean; ++ function ">" (Left, Right : Address) return Boolean; ++ function ">=" (Left, Right : Address) return Boolean; ++ function "=" (Left, Right : Address) return Boolean; ++ ++ pragma Import (Intrinsic, "<"); ++ pragma Import (Intrinsic, "<="); ++ pragma Import (Intrinsic, ">"); ++ pragma Import (Intrinsic, ">="); ++ pragma Import (Intrinsic, "="); ++ ++ -- Other System-Dependent Declarations ++ ++ type Bit_Order is (High_Order_First, Low_Order_First); ++ Default_Bit_Order : constant Bit_Order := Low_Order_First; ++ pragma Warnings (Off, Default_Bit_Order); -- kill constant condition warning ++ ++ -- Priority-related Declarations (RM D.1) ++ ++ Max_Priority : constant Positive := 30; ++ Max_Interrupt_Priority : constant Positive := 31; ++ ++ subtype Any_Priority is Integer range 0 .. 31; ++ subtype Priority is Any_Priority range 0 .. 30; ++ subtype Interrupt_Priority is Any_Priority range 31 .. 31; ++ ++ Default_Priority : constant Priority := 15; ++ ++private ++ ++ type Address is mod Memory_Size; ++ Null_Address : constant Address := 0; ++ ++ -------------------------------------- ++ -- System Implementation Parameters -- ++ -------------------------------------- ++ ++ -- These parameters provide information about the target that is used ++ -- by the compiler. They are in the private part of System, where they ++ -- can be accessed using the special circuitry in the Targparm unit ++ -- whose source should be consulted for more detailed descriptions ++ -- of the individual switch values. ++ ++ Backend_Divide_Checks : constant Boolean := False; ++ Backend_Overflow_Checks : constant Boolean := True; ++ Command_Line_Args : constant Boolean := True; ++ Configurable_Run_Time : constant Boolean := False; ++ Denorm : constant Boolean := True; ++ Duration_32_Bits : constant Boolean := False; ++ Exit_Status_Supported : constant Boolean := True; ++ Fractional_Fixed_Ops : constant Boolean := False; ++ Frontend_Layout : constant Boolean := False; ++ Machine_Overflows : constant Boolean := False; ++ Machine_Rounds : constant Boolean := True; ++ Preallocated_Stacks : constant Boolean := False; ++ Signed_Zeros : constant Boolean := True; ++ Stack_Check_Default : constant Boolean := False; ++ Stack_Check_Probes : constant Boolean := True; ++ Stack_Check_Limits : constant Boolean := False; ++ Support_Aggregates : constant Boolean := True; ++ Support_Composite_Assign : constant Boolean := True; ++ Support_Composite_Compare : constant Boolean := True; ++ Support_Long_Shifts : constant Boolean := True; ++ Always_Compatible_Rep : constant Boolean := False; ++ Suppress_Standard_Library : constant Boolean := False; ++ Use_Ada_Main_Program_Name : constant Boolean := False; ++ Frontend_Exceptions : constant Boolean := False; ++ ZCX_By_Default : constant Boolean := True; ++ ++end System; +diff --git a/gcc/config/loongarch/t-linux b/gcc/config/loongarch/t-linux +index 5ecf814fa..2559041ae 100644 +--- a/gcc/config/loongarch/t-linux ++++ b/gcc/config/loongarch/t-linux +@@ -36,7 +36,7 @@ gen_mlib_spec = $(if $(word 2,$1),\ + lib_build_self_spec = % +Date: Wed, 13 Dec 2023 11:35:16 +0800 +Subject: [PATCH 20/30] LoongArch: enable __builtin_thread_pointer + +Change-Id: I93e99be82832f44c6a422a9accf8a8ccf776448f +--- + gcc/config/loongarch/loongarch.md | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 445f3773b..d6203ed1a 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -106,6 +106,7 @@ + + (define_constants + [(RETURN_ADDR_REGNUM 1) ++ (TP_REGNUM 2) + (T0_REGNUM 12) + (T1_REGNUM 13) + (S0_REGNUM 23) +@@ -3580,6 +3581,12 @@ + [(set_attr "length" "0") + (set_attr "type" "ghost")]) + ++;; Named pattern for __builtin_thread_pointer. ++(define_expand "get_thread_pointer" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (reg:P TP_REGNUM))] ++ "" ++ {}) + + (define_split + [(match_operand 0 "small_data_pattern")] +-- +2.43.5 + diff --git a/LoongArch-support-static-pie.patch b/LoongArch-support-static-pie.patch new file mode 100644 index 0000000..bef1882 --- /dev/null +++ b/LoongArch-support-static-pie.patch @@ -0,0 +1,30 @@ +From cef2f7e09e617e63cb080e29b0184b534336d8e8 Mon Sep 17 00:00:00 2001 +From: Yang Yujie +Date: Fri, 21 Jul 2023 15:07:17 +0800 +Subject: [PATCH 11/30] LoongArch: support -static-pie. + +Change-Id: I570c19f12c982d404a1e260539af2fbe02bb5932 +--- + gcc/config/loongarch/gnu-user.h | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h +index 603aed5a2..d8cbfb7e8 100644 +--- a/gcc/config/loongarch/gnu-user.h ++++ b/gcc/config/loongarch/gnu-user.h +@@ -43,8 +43,10 @@ along with GCC; see the file COPYING3. If not see + #undef GNU_USER_TARGET_LINK_SPEC + #define GNU_USER_TARGET_LINK_SPEC \ + "%{G*} %{shared} -m " GNU_USER_LINK_EMULATION \ +- "%{!shared: %{static} %{!static: %{rdynamic:-export-dynamic} " \ +- "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}" ++ "%{!shared: %{static} %{!static: %{!static-pie: \ ++ %{rdynamic:-export-dynamic} " \ ++ "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}} \ ++ %{static-pie:-static -pie --no-dynamic-linker -z text}" + + + /* Similar to standard Linux, but adding -ffast-math support. */ +-- +2.43.5 + diff --git a/Modify-MOVE_RATIO-1-using-4-consecutive-scalar.patch b/Modify-MOVE_RATIO-1-using-4-consecutive-scalar.patch new file mode 100644 index 0000000..dbfaff3 --- /dev/null +++ b/Modify-MOVE_RATIO-1-using-4-consecutive-scalar.patch @@ -0,0 +1,61 @@ +From 366f3f141a5770468151805249bf495ecac93b47 Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 07:11:33 +0000 +Subject: [PATCH 15/30] Modify MOVE_RATIO + 1, using 4 consecutive scalar + +ld.d/st.d instead of xvld/xvst to implement 32B BLK move. Especially +effective for SPEC2017 538. + +Signed-off-by: Peng Fan +--- + gcc/config/loongarch/loongarch.h | 2 +- + .../gcc.target/loongarch/blk-move-opt.c | 24 +++++++++++++++++++ + 2 files changed, 25 insertions(+), 1 deletion(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/blk-move-opt.c + +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index 1b26230cb..2f3117fd0 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1215,7 +1215,7 @@ typedef struct { + + #define MOVE_RATIO(speed) \ + (HAVE_movmemsi \ +- ? LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD \ ++ ? LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD + 1 \ + : CLEAR_RATIO (speed) / 2) + + /* For CLEAR_RATIO, when optimizing for size, give a better estimate +diff --git a/gcc/testsuite/gcc.target/loongarch/blk-move-opt.c b/gcc/testsuite/gcc.target/loongarch/blk-move-opt.c +new file mode 100644 +index 000000000..592c3c921 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/blk-move-opt.c +@@ -0,0 +1,24 @@ ++/* Considering vector alignment memory access, use 4 consecutive scalar ++ ld.d/st.d instead of xvld/xvst to implement 32B BLK move. ++ Especially effective for SPEC2017 538. */ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler-not "xvld" } } */ ++/* { dg-final { scan-assembler-not "xvst" } } */ ++ ++typedef struct _SA ++{ ++ long a; ++ long b; ++ long c; ++ long d; ++} SA; ++ ++extern SA aa; ++extern SA foo (SA); ++ ++void ++test (SA s) ++{ ++ foo (s); ++} +-- +2.43.5 + diff --git a/Optimize-float-vector-unpack-operation.patch b/Optimize-float-vector-unpack-operation.patch new file mode 100644 index 0000000..60f25ae --- /dev/null +++ b/Optimize-float-vector-unpack-operation.patch @@ -0,0 +1,253 @@ +From 52d486ecc5eb6c82a70c885709fe6c3d1ff6ea11 Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 07:03:15 +0000 +Subject: [PATCH 09/30] Optimize float vector unpack operation + +Signed-off-by: Peng Fan +--- + gcc/config/loongarch/lasx.md | 49 +++++++++---------- + gcc/config/loongarch/loongarch-protos.h | 2 +- + gcc/config/loongarch/loongarch.c | 34 ++++++------- + gcc/config/loongarch/lsx.md | 30 +++++------- + .../gcc.target/loongarch/vec-unpack.c | 18 +++++++ + 5 files changed, 68 insertions(+), 65 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vec-unpack.c + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index 01a21bd6c..6f191194d 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -547,10 +547,12 @@ + (float_extend:V4DF + (vec_select:V4SF + (match_operand:V8SF 1 "register_operand" "f") +- (match_dup 2))))] ++ (parallel [(const_int 4) (const_int 5) ++ (const_int 6) (const_int 7)]))))] + "ISA_HAS_LASX" + { +- operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, true/*high_p*/); ++ loongarch_expand_fp_vec_unpack(operands, true/*high_p*/); ++ DONE; + }) + + (define_expand "vec_unpacks_lo_v8sf" +@@ -558,10 +560,12 @@ + (float_extend:V4DF + (vec_select:V4SF + (match_operand:V8SF 1 "register_operand" "f") +- (match_dup 2))))] ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 2) (const_int 3)]))))] + "ISA_HAS_LASX" + { +- operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode, false/*high_p*/); ++ loongarch_expand_fp_vec_unpack(operands, false/*high_p*/); ++ DONE; + }) + + (define_expand "vec_unpacks_hi_" +@@ -2696,12 +2700,16 @@ + ;; Define for builtin function. + (define_insn "lasx_xvfcvth_d_s" + [(set (match_operand:V4DF 0 "register_operand" "=f") +- (unspec:V4DF [(match_operand:V8SF 1 "register_operand" "f")] +- UNSPEC_LASX_XVFCVTH))] ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 2) (const_int 3) ++ (const_int 6) (const_int 7)]))))] + "ISA_HAS_LASX" + "xvfcvth.d.s\t%u0,%u1" + [(set_attr "type" "simd_fcvt") +- (set_attr "mode" "V4DF")]) ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "12")]) + + ;; Define for gen insn. + (define_insn "lasx_xvfcvth_d_insn" +@@ -2730,12 +2738,16 @@ + ;; Define for builtin function. + (define_insn "lasx_xvfcvtl_d_s" + [(set (match_operand:V4DF 0 "register_operand" "=f") +- (unspec:V4DF [(match_operand:V8SF 1 "register_operand" "f")] +- UNSPEC_LASX_XVFCVTL))] ++ (float_extend:V4DF ++ (vec_select:V4SF ++ (match_operand:V8SF 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1) ++ (const_int 4) (const_int 5)]))))] + "ISA_HAS_LASX" + "xvfcvtl.d.s\t%u0,%u1" + [(set_attr "type" "simd_fcvt") +- (set_attr "mode" "V4DF")]) ++ (set_attr "mode" "V4DF") ++ (set_attr "length" "8")]) + + ;; Define for gen insn. + (define_insn "lasx_xvfcvtl_d_insn" +@@ -5189,20 +5201,3 @@ + const0_rtx)); + DONE; + }) +- +-;; merge vec_unpacks_hi_v8sf/vec_unpacks_lo_v8sf +-(define_peephole +- [(set (match_operand:V4DF 0 "register_operand") +- (float_extend:V4DF (vec_select:V4SF +- (match_operand:V8SF 1 "register_operand") +- (parallel [(const_int 0) (const_int 1) +- (const_int 2) (const_int 3)])))) +- (set (match_operand:V4DF 2 "register_operand") +- (float_extend:V4DF (vec_select:V4SF +- (match_operand:V8SF 3 "register_operand") +- (parallel [(const_int 4) (const_int 5) +- (const_int 6) (const_int 7)]))))] +- "ISA_HAS_LASX && rtx_equal_p (operands[1], operands[3])" +-{ +- return "xvpermi.d\t%u2,%u1,0xd8\n\txvfcvtl.d.s\t%u0,%u2\n\txvfcvth.d.s\t%u2,%u2"; +-}) +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 08a9e8dc2..2fb1d1c30 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -115,7 +115,6 @@ extern bool loongarch_const_vector_same_int_p (rtx, machine_mode, HOST_WIDE_INT, + extern bool loongarch_const_vector_shuffle_set_p (rtx, machine_mode); + extern bool loongarch_const_vector_bitimm_set_p (rtx, machine_mode); + extern bool loongarch_const_vector_bitimm_clr_p (rtx, machine_mode); +-extern rtx loongarch_lsx_vec_parallel_const_half (machine_mode, bool); + extern rtx loongarch_gen_const_int_vector (machine_mode, HOST_WIDE_INT); + extern enum reg_class loongarch_secondary_reload_class (enum reg_class, + machine_mode, +@@ -166,6 +165,7 @@ extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs, + extern void loongarch_expand_vector_group_init (rtx, rtx); + extern void loongarch_expand_vector_init (rtx, rtx); + extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool); ++extern void loongarch_expand_fp_vec_unpack (rtx op[2], bool); + extern void loongarch_expand_vec_perm (rtx, rtx, rtx, rtx); + extern void loongarch_expand_vec_perm_1 (rtx[]); + extern void loongarch_expand_vector_extract (rtx, rtx, int); +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index df1638981..15e29abc9 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -9533,6 +9533,21 @@ loongarch_expand_vector_reduc (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in) + } + } + ++/* Expand a float vector unpack operation. */ ++ ++void ++loongarch_expand_fp_vec_unpack(rtx operands[2], bool high_p) ++{ ++ rtx tmp = gen_reg_rtx (V8SFmode); ++ ++ /* { 0 1 2 3 4 5 6 7 } -> { 0 1 4 5 2 3 6 7 } */ ++ emit_insn (gen_lasx_xvpermi_d_v8sf (tmp, operands[1], GEN_INT (0xd8))); ++ if (high_p) ++ emit_insn (gen_lasx_xvfcvth_d_s(operands[0], tmp)); ++ else ++ emit_insn (gen_lasx_xvfcvtl_d_s(operands[0], tmp)); ++} ++ + /* Expand an integral vector unpack operation. */ + + void +@@ -9647,25 +9662,6 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p) + gcc_unreachable (); + } + +-/* Construct and return PARALLEL RTX with CONST_INTs for HIGH (high_p == TRUE) +- or LOW (high_p == FALSE) half of a vector for mode MODE. */ +- +-rtx +-loongarch_lsx_vec_parallel_const_half (machine_mode mode, bool high_p) +-{ +- int nunits = GET_MODE_NUNITS (mode); +- rtvec v = rtvec_alloc (nunits / 2); +- int base; +- int i; +- +- base = high_p ? nunits / 2 : 0; +- +- for (i = 0; i < nunits / 2; i++) +- RTVEC_ELT (v, i) = GEN_INT (base + i); +- +- return gen_rtx_PARALLEL (VOIDmode, v); +-} +- + /* A subroutine of loongarch_expand_vec_init, match constant vector elements. */ + + static inline bool +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +index 2b1d6f109..69d12fd55 100644 +--- a/gcc/config/loongarch/lsx.md ++++ b/gcc/config/loongarch/lsx.md +@@ -388,27 +388,21 @@ + [(set_attr "type" "simd_permute") + (set_attr "mode" "")]) + +-(define_expand "vec_unpacks_hi_v4sf" ++(define_expand "vec_unpacks_lo_v4sf" + [(set (match_operand:V2DF 0 "register_operand" "=f") +- (float_extend:V2DF +- (vec_select:V2SF +- (match_operand:V4SF 1 "register_operand" "f") +- (match_dup 2))))] +- "ISA_HAS_LSX" +-{ +- operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode, true/*high_p*/); +-}) ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (parallel [(const_int 0) (const_int 1)]))))] ++ "ISA_HAS_LSX") + +-(define_expand "vec_unpacks_lo_v4sf" ++(define_expand "vec_unpacks_hi_v4sf" + [(set (match_operand:V2DF 0 "register_operand" "=f") +- (float_extend:V2DF +- (vec_select:V2SF +- (match_operand:V4SF 1 "register_operand" "f") +- (match_dup 2))))] +- "ISA_HAS_LSX" +-{ +- operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode, false/*high_p*/); +-}) ++ (float_extend:V2DF ++ (vec_select:V2SF ++ (match_operand:V4SF 1 "register_operand" "f") ++ (parallel [(const_int 2) (const_int 3)]))))] ++ "ISA_HAS_LSX") + + (define_expand "vec_unpacks_hi_" + [(match_operand: 0 "register_operand") +diff --git a/gcc/testsuite/gcc.target/loongarch/vec-unpack.c b/gcc/testsuite/gcc.target/loongarch/vec-unpack.c +new file mode 100644 +index 000000000..3e0f5bb92 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vec-unpack.c +@@ -0,0 +1,18 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler-times "xvpermi.d" 2} } */ ++/* { dg-final { scan-assembler-times "xvfcvtl.d.s" 2} } */ ++/* { dg-final { scan-assembler-times "xvfcvth.d.s" 2} } */ ++ ++#define N 16 ++float f[N]; ++double d[N]; ++int n[N]; ++ ++__attribute__((noinline)) void ++foo (void) ++{ ++ int i; ++ for (i = 0; i < N; i++) ++ d[i] = f[i]; ++} +-- +2.43.5 + diff --git a/Optimize-the-implementation-of-multiplication-operat.patch b/Optimize-the-implementation-of-multiplication-operat.patch new file mode 100644 index 0000000..f6f5382 --- /dev/null +++ b/Optimize-the-implementation-of-multiplication-operat.patch @@ -0,0 +1,125 @@ +From 74519e221999f586ac94fd4b1dbab3e3bdc8a97e Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 06:58:18 +0000 +Subject: [PATCH 06/30] Optimize the implementation of multiplication + operations. + +--- + gcc/config/loongarch/loongarch.md | 41 +++++++++++---------- + gcc/testsuite/gcc.target/loongarch/mulh.c | 12 ++++++ + gcc/testsuite/gcc.target/loongarch/mulw_d.c | 12 ++++++ + 3 files changed, 46 insertions(+), 19 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/mulh.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/mulw_d.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index a08c4a62c..cd0702e12 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -699,15 +699,6 @@ + [(set_attr "type" "imul") + (set_attr "mode" "")]) + +-(define_insn "mulsidi3_64bit" +- [(set (match_operand:DI 0 "register_operand" "=r") +- (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r")) +- (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))] +- "TARGET_64BIT" +- "mul.d\t%0,%1,%2" +- [(set_attr "type" "imul") +- (set_attr "mode" "DI")]) +- + (define_insn "*mulsi3_extended" + [(set (match_operand:DI 0 "register_operand" "=r") + (sign_extend:DI +@@ -758,21 +749,33 @@ + (set_attr "mode" "DI")]) + + (define_expand "mulsidi3" +- [(set (match_operand:DI 0 "register_operand" "=r") ++ [(set (match_operand:DI 0 "register_operand") + (mult:DI (any_extend:DI +- (match_operand:SI 1 "register_operand" " r")) ++ (match_operand:SI 1 "register_operand")) + (any_extend:DI +- (match_operand:SI 2 "register_operand" " r"))))] +- "!TARGET_64BIT" ++ (match_operand:SI 2 "register_operand"))))] ++ "" + { +- rtx temp = gen_reg_rtx (SImode); +- emit_insn (gen_mulsi3 (temp, operands[1], operands[2])); +- emit_insn (gen_mulsi3_highpart (loongarch_subword (operands[0], true), ++ if (!TARGET_64BIT) ++ { ++ rtx temp = gen_reg_rtx (SImode); ++ emit_insn (gen_mulsi3 (temp, operands[1], operands[2])); ++ emit_insn (gen_mulsi3_highpart (loongarch_subword (operands[0], true), + operands[1], operands[2])); +- emit_insn (gen_movsi (loongarch_subword (operands[0], false), temp)); +- DONE; ++ emit_insn (gen_movsi (loongarch_subword (operands[0], false), temp)); ++ DONE; ++ } + }) + ++(define_insn "mulsidi3_64bit" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "r")) ++ (any_extend:DI (match_operand:SI 2 "register_operand" "r"))))] ++ "TARGET_64BIT" ++ "mulw.d.w\t%0,%1,%2" ++ [(set_attr "type" "imul") ++ (set_attr "mode" "DI")]) ++ + (define_insn "mulsi3_highpart" + [(set (match_operand:SI 0 "register_operand" "=r") + (truncate:SI +@@ -782,7 +785,7 @@ + (any_extend:DI + (match_operand:SI 2 "register_operand" " r"))) + (const_int 32))))] +- "!TARGET_64BIT" ++ "" + "mulh.w\t%0,%1,%2" + [(set_attr "type" "imul") + (set_attr "mode" "SI")]) +diff --git a/gcc/testsuite/gcc.target/loongarch/mulh.c b/gcc/testsuite/gcc.target/loongarch/mulh.c +new file mode 100644 +index 000000000..08760219b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/mulh.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler "mulh.wu" } } */ ++ ++typedef unsigned int DI __attribute__((mode(DI))); ++typedef unsigned int SI __attribute__((mode(SI))); ++ ++SI ++f (SI x, SI y) ++{ ++ return ((DI) x * y) >> 32; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/mulw_d.c b/gcc/testsuite/gcc.target/loongarch/mulw_d.c +new file mode 100644 +index 000000000..04696adb4 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/mulw_d.c +@@ -0,0 +1,12 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler "mulw.d.wu" } } */ ++ ++typedef unsigned int DI __attribute__((mode(DI))); ++typedef unsigned int SI __attribute__((mode(SI))); ++ ++DI ++f (SI x, SI y) ++{ ++ return (DI) x * y; ++} +-- +2.43.5 + diff --git a/asan.c-asan_emit_stack_protection-Use-full-sized-mas.patch b/asan.c-asan_emit_stack_protection-Use-full-sized-mas.patch new file mode 100644 index 0000000..7f471ae --- /dev/null +++ b/asan.c-asan_emit_stack_protection-Use-full-sized-mas.patch @@ -0,0 +1,48 @@ +From a5939ed36a414b669e52cbfbef472bceb7e3968b Mon Sep 17 00:00:00 2001 +From: chenxiaolong +Date: Tue, 30 Jan 2024 09:20:40 +0800 +Subject: [PATCH 27/30] asan.c (asan_emit_stack_protection): Use full-sized + mask to align the base address on 64-bit strict-alignment platforms. + +commit 362432c00db860483058ff609a893151bf8e4b1c +Author: Eric Botcazou +Date: Fri Feb 15 21:40:24 2019 +0000 + + asan.c (asan_emit_stack_protection): Use full-sized mask to align the base address on 64-bit strict-alignment platforms. + + * asan.c (asan_emit_stack_protection): Use full-sized mask to align + the base address on 64-bit strict-alignment platforms. + + From-SVN: r268949 + +Change-Id: I91b37f65b0c3c68aab2106f45c4b25a9f57d5d06 +--- + gcc/asan.c | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +diff --git a/gcc/asan.c b/gcc/asan.c +index 220ecf64d..6adb6e5f5 100644 +--- a/gcc/asan.c ++++ b/gcc/asan.c +@@ -1309,11 +1309,13 @@ asan_emit_stack_protection (rtx base, rtx pbase, unsigned int alignb, + } + /* Align base if target is STRICT_ALIGNMENT. */ + if (STRICT_ALIGNMENT) +- base = expand_binop (Pmode, and_optab, base, +- gen_int_mode (-((GET_MODE_ALIGNMENT (SImode) +- << ASAN_SHADOW_SHIFT) +- / BITS_PER_UNIT), Pmode), NULL_RTX, +- 1, OPTAB_DIRECT); ++ { ++ const HOST_WIDE_INT align ++ = (GET_MODE_ALIGNMENT (SImode) / BITS_PER_UNIT) << ASAN_SHADOW_SHIFT; ++ base = expand_binop (Pmode, and_optab, base, gen_int_mode (-align, Pmode), ++ NULL_RTX, 1, OPTAB_DIRECT); ++ } ++ + + if (use_after_return_class == -1 && pbase) + emit_move_insn (pbase, base); +-- +2.43.5 + diff --git a/gcc-8.3.0-Delete-the-movti-movtf-templates-and-fix-t.patch b/gcc-8.3.0-Delete-the-movti-movtf-templates-and-fix-t.patch new file mode 100644 index 0000000..446ef9e --- /dev/null +++ b/gcc-8.3.0-Delete-the-movti-movtf-templates-and-fix-t.patch @@ -0,0 +1,95 @@ +From 81b226d0015bee741480ca1e85bb265935c3ecfa Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Tue, 12 Mar 2024 11:31:09 +0800 +Subject: [PATCH 28/30] gcc-8.3.0: Delete the movti/movtf templates and fix the + issues caused by these two templates. + +The java virtual machine will give an error when compiling with +gcc8.3.0 -O1. Through debugging, it was found that it was caused +by the two templates of movti/movtf. It is now deleted. + +Change-Id: I99b95824c3d54a07b3357ba93517610d44697039 +--- + gcc/config/loongarch/loongarch.md | 66 ------------------------------- + 1 file changed, 66 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index e85200af8..761132724 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -1940,72 +1940,6 @@ + (set_attr "mode" "DF")]) + + +-;; 128-bit integer moves +- +-(define_expand "movti" +- [(set (match_operand:TI 0) +- (match_operand:TI 1))] +- "TARGET_64BIT" +-{ +- if (loongarch_legitimize_move (TImode, operands[0], operands[1])) +- DONE; +-}) +- +-(define_insn "*movti" +- [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m") +- (match_operand:TI 1 "move_operand" "r,i,m,rJ"))] +- "TARGET_64BIT +- && (register_operand (operands[0], TImode) +- || reg_or_0_operand (operands[1], TImode))" +- { return loongarch_output_move (operands[0], operands[1]); } +- [(set_attr "move_type" "move,const,load,store") +- (set (attr "mode") +- (if_then_else (eq_attr "move_type" "imul") +- (const_string "SI") +- (const_string "TI")))]) +- +-;; 128-bit floating point moves +- +-(define_expand "movtf" +- [(set (match_operand:TF 0) +- (match_operand:TF 1))] +- "TARGET_64BIT" +-{ +- if (loongarch_legitimize_move (TFmode, operands[0], operands[1])) +- DONE; +-}) +- +-;; This pattern handles both hard- and soft-float cases. +-(define_insn "*movtf" +- [(set (match_operand:TF 0 "nonimmediate_operand" "=r,r,m,f,r,f,m") +- (match_operand:TF 1 "move_operand" "rG,m,rG,rG,f,m,f"))] +- "TARGET_64BIT +- && (register_operand (operands[0], TFmode) +- || reg_or_0_operand (operands[1], TFmode))" +- "#" +- [(set_attr "move_type" "move,load,store,mgtf,mftg,fpload,fpstore") +- (set_attr "mode" "TF")]) +- +-(define_split +- [(set (match_operand:MOVE64 0 "nonimmediate_operand") +- (match_operand:MOVE64 1 "move_operand"))] +- "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])" +- [(const_int 0)] +-{ +- loongarch_split_move_insn (operands[0], operands[1], curr_insn); +- DONE; +-}) +- +-(define_split +- [(set (match_operand:MOVE128 0 "nonimmediate_operand") +- (match_operand:MOVE128 1 "move_operand"))] +- "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])" +- [(const_int 0)] +-{ +- loongarch_split_move_insn (operands[0], operands[1], curr_insn); +- DONE; +-}) +- + ;; Emit a doubleword move in which exactly one of the operands is + ;; a floating-point register. We can't just emit two normal moves + ;; because of the constraints imposed by the FPU register model; +-- +2.43.5 + diff --git a/gcc-8.3.0-Export-headers-to-lib-gcc-loongarch64-linu.patch b/gcc-8.3.0-Export-headers-to-lib-gcc-loongarch64-linu.patch new file mode 100644 index 0000000..c64bf4d --- /dev/null +++ b/gcc-8.3.0-Export-headers-to-lib-gcc-loongarch64-linu.patch @@ -0,0 +1,32 @@ +From c9a998ecb4095eee35710fc3650bd1300d255729 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Thu, 29 Jun 2023 16:37:46 +0800 +Subject: [PATCH 07/30] gcc-8.3.0: Export headers to + lib/gcc/loongarch64-linux-gnu/8.3.0/plugin/include/config/loongarch. + +The compilation failure of the annobin application is caused by the incomplete header file exported. +Export header files loongarch-def.h loongarch-tune.h and loongarch-driver.h. + +Change-Id: Id22e737c5669c38ea32bc76ac40022738c1f75e1 +--- + gcc/config/loongarch/t-loongarch | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch +index 9d32fbcf6..e34512114 100644 +--- a/gcc/config/loongarch/t-loongarch ++++ b/gcc/config/loongarch/t-loongarch +@@ -20,6 +20,10 @@ + LA_MULTIARCH_TRIPLET = $(patsubst LA_MULTIARCH_TRIPLET=%,%,$\ + $(filter LA_MULTIARCH_TRIPLET=%,$(tm_defines))) + ++OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h \ ++ $(srcdir)/config/loongarch/loongarch-tune.h \ ++ $(srcdir)/config/loongarch/loongarch-driver.h ++ + # String definition header + LA_STR_H = $(srcdir)/config/loongarch/loongarch-str.h + $(LA_STR_H): s-loongarch-str ; @true +-- +2.43.5 + diff --git a/gcc-8.3.0-Fix-bug-for-simpley.patch b/gcc-8.3.0-Fix-bug-for-simpley.patch new file mode 100644 index 0000000..5b71429 --- /dev/null +++ b/gcc-8.3.0-Fix-bug-for-simpley.patch @@ -0,0 +1,83 @@ +From 3f8a77414d7a678e6dae1633081828ca56f376b2 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Tue, 4 Apr 2023 20:53:22 +0800 +Subject: [PATCH 01/30] gcc-8.3.0: Fix bug for simpley. + +When the code is not modified, the combine pass will consider the following +two rtxs to be equivalent: + + (set (reg:DI 141) + (subreg:DI (leu:SI (reg:DI 242) + (const_int 23)) 0)) +and + (set (subreg:SI (reg:DI 141) 0) + (leu:SI (reg:DI 242) + (const_int 23))) + +As a result only the lower 32 bits are stored when register 141 is stored. +However, it loads 64bit from the memory during use, which causes the program +to run incorrectly. + +Change-Id: I37f6c3ed8882fb2bad61d883f43c4088eaaa993f +--- + gcc/combine.c | 44 ++++++++++++++++++++++---------------------- + 1 file changed, 22 insertions(+), 22 deletions(-) + +diff --git a/gcc/combine.c b/gcc/combine.c +index aa247da72..f53783f51 100644 +--- a/gcc/combine.c ++++ b/gcc/combine.c +@@ -6956,28 +6956,28 @@ simplify_set (rtx x) + be undefined. On machine where it is defined, this transformation is safe + as long as M1 and M2 have the same number of words. */ + +- if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src) +- && !OBJECT_P (SUBREG_REG (src)) +- && (known_equal_after_align_up +- (GET_MODE_SIZE (GET_MODE (src)), +- GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))), +- UNITS_PER_WORD)) +- && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src)) +- && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER +- && !REG_CAN_CHANGE_MODE_P (REGNO (dest), +- GET_MODE (SUBREG_REG (src)), +- GET_MODE (src))) +- && (REG_P (dest) +- || (GET_CODE (dest) == SUBREG +- && REG_P (SUBREG_REG (dest))))) +- { +- SUBST (SET_DEST (x), +- gen_lowpart (GET_MODE (SUBREG_REG (src)), +- dest)); +- SUBST (SET_SRC (x), SUBREG_REG (src)); +- +- src = SET_SRC (x), dest = SET_DEST (x); +- } ++// if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src) ++// && !OBJECT_P (SUBREG_REG (src)) ++// && (known_equal_after_align_up ++// (GET_MODE_SIZE (GET_MODE (src)), ++// GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))), ++// UNITS_PER_WORD)) ++// && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src)) ++// && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER ++// && !REG_CAN_CHANGE_MODE_P (REGNO (dest), ++// GET_MODE (SUBREG_REG (src)), ++// GET_MODE (src))) ++// && (REG_P (dest) ++// || (GET_CODE (dest) == SUBREG ++// && REG_P (SUBREG_REG (dest))))) ++// { ++// SUBST (SET_DEST (x), ++// gen_lowpart (GET_MODE (SUBREG_REG (src)), ++// dest)); ++// SUBST (SET_SRC (x), SUBREG_REG (src)); ++// ++// src = SET_SRC (x), dest = SET_DEST (x); ++// } + + /* If we have (set (cc0) (subreg ...)), we try to remove the subreg + in SRC. */ +-- +2.43.5 + diff --git a/gcc-8.3.0-Fix-bug-when-using-mrecip-rsqrt-which-casu.patch b/gcc-8.3.0-Fix-bug-when-using-mrecip-rsqrt-which-casu.patch new file mode 100644 index 0000000..259bdca --- /dev/null +++ b/gcc-8.3.0-Fix-bug-when-using-mrecip-rsqrt-which-casu.patch @@ -0,0 +1,82 @@ +From e6f71158b63bd88586381d68a0b4c2a51b823dd7 Mon Sep 17 00:00:00 2001 +From: Jiahao Xu +Date: Fri, 4 Aug 2023 17:16:01 +0800 +Subject: [PATCH 12/30] gcc-8.3.0: Fix bug when using -mrecip=rsqrt, which + casues an internal compiler error. + +Change-Id: I319f3d3df6929432f811da4d188c21eeea42beba +--- + gcc/config/loongarch/lasx.md | 4 ++-- + gcc/config/loongarch/loongarch.md | 4 ++-- + gcc/config/loongarch/lsx.md | 4 ++-- + 3 files changed, 6 insertions(+), 6 deletions(-) + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index 6f191194d..edd6e3204 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -1643,7 +1643,7 @@ + [(set (match_operand:FLASX 0 "register_operand" "=f") + (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] + UNSPEC_RECIPE))] +- "ISA_HAS_LASX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_DIV" ++ "ISA_HAS_LASX && flag_unsafe_math_optimizations" + "xvfrecipe.\t%u0,%u1" + [(set_attr "type" "simd_fdiv") + (set_attr "mode" "")]) +@@ -1671,7 +1671,7 @@ + [(set (match_operand:FLASX 0 "register_operand" "=f") + (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")] + UNSPEC_RSQRTE))] +- "ISA_HAS_LASX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_RSQRT" ++ "ISA_HAS_LASX && flag_unsafe_math_optimizations" + "xvfrsqrte.\t%u0,%u1" + [(set_attr "type" "simd_fdiv") + (set_attr "mode" "")]) +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index cd0702e12..445f3773b 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -875,7 +875,7 @@ + [(set (match_operand:ANYF 0 "register_operand" "=f") + (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] + UNSPEC_RECIPE))] +- "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations && TARGET_RECIP_DIV" ++ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations" + "frecipe.\t%0,%1" + [(set_attr "type" "frsqrte") + (set_attr "mode" "") +@@ -1099,7 +1099,7 @@ + [(set (match_operand:ANYF 0 "register_operand" "=f") + (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")] + UNSPEC_RSQRTE))] +- "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations && TARGET_RECIP_SQRT" ++ "TARGET_HARD_FLOAT && flag_unsafe_math_optimizations" + "frsqrte.\t%0,%1" + [(set_attr "type" "frsqrte") + (set_attr "mode" "")]) +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +index 69d12fd55..a6962b271 100644 +--- a/gcc/config/loongarch/lsx.md ++++ b/gcc/config/loongarch/lsx.md +@@ -1532,7 +1532,7 @@ + [(set (match_operand:FLSX 0 "register_operand" "=f") + (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] + UNSPEC_RECIPE))] +- "ISA_HAS_LSX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_DIV" ++ "ISA_HAS_LSX && flag_unsafe_math_optimizations" + "vfrecipe.\t%w0,%w1" + [(set_attr "type" "simd_fdiv") + (set_attr "mode" "")]) +@@ -1559,7 +1559,7 @@ + [(set (match_operand:FLSX 0 "register_operand" "=f") + (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")] + UNSPEC_RSQRTE))] +- "ISA_HAS_LSX && flag_unsafe_math_optimizations && TARGET_RECIP_VEC_SQRT" ++ "ISA_HAS_LSX && flag_unsafe_math_optimizations" + "vfrsqrte.\t%w0,%w1" + [(set_attr "type" "simd_fdiv") + (set_attr "mode" "")]) +-- +2.43.5 + diff --git a/gcc-8.3.0-Fix-the-bug-in-loongarch_emit_stack_tie.patch b/gcc-8.3.0-Fix-the-bug-in-loongarch_emit_stack_tie.patch new file mode 100644 index 0000000..5188219 --- /dev/null +++ b/gcc-8.3.0-Fix-the-bug-in-loongarch_emit_stack_tie.patch @@ -0,0 +1,34 @@ +From 1806d2f053cdba5cfe5404b00eb369baf07103f3 Mon Sep 17 00:00:00 2001 +From: Guo Jie +Date: Tue, 27 Jun 2023 10:03:53 +0800 +Subject: [PATCH 05/30] gcc-8.3.0: Fix the bug in loongarch_emit_stack_tie. + +Which may result in implicit references to $fp when frame_pointer_needed is false, +causing regs_ever_live[$fp] to be true when $fp is not explicitly used, +resulting in $fp being used as the target replacement register in the rnreg pass. + +The bug originates from SPEC2017 541.leela_r(-flto). + +Change-Id: I11316853e230845cefe6f54189f616432b80cca1 +--- + gcc/config/loongarch/loongarch.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index 4fdec9d6c..df1638981 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -1193,7 +1193,9 @@ static void + loongarch_emit_stack_tie (void) + { + emit_insn (PMODE_INSN (gen_stack_tie, +- (stack_pointer_rtx, hard_frame_pointer_rtx))); ++ (stack_pointer_rtx, frame_pointer_needed ++ ? hard_frame_pointer_rtx ++ : stack_pointer_rtx))); + } + + #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP) +-- +2.43.5 + diff --git a/gcc-8.3.0-Lower-the-priority-of-registers-t3-through.patch b/gcc-8.3.0-Lower-the-priority-of-registers-t3-through.patch new file mode 100644 index 0000000..d8644e1 --- /dev/null +++ b/gcc-8.3.0-Lower-the-priority-of-registers-t3-through.patch @@ -0,0 +1,49 @@ +From f5a66febf89339ca5300d91d6ccf322a62000919 Mon Sep 17 00:00:00 2001 +From: Jiahao Xu +Date: Fri, 4 Aug 2023 17:42:57 +0800 +Subject: [PATCH 14/30] gcc-8.3.0: Lower the priority of registers t3 through + t8. + +gcc/ChangeLog: + * config/loongarch/loongarch.c (loongarch_register_priority): New function. + +Change-Id: I9fe434ab8f279f9034f7f10ea693e2f8c7979a14 +--- + gcc/config/loongarch/loongarch.c | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index 7b5ab23df..0a20850d6 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -6443,6 +6443,18 @@ loongarch_register_move_cost (machine_mode mode, reg_class_t from, + return 0; + } + ++/* Return a register priority for hard reg REGNO. */ ++ ++static int ++loongarch_register_priority (int regno) ++{ ++ /* Lower the priority of registers t3 through t8. */ ++ if (IN_RANGE (regno, GP_REG_FIRST + 15, GP_REG_FIRST + 20)) ++ return 1; ++ ++ return 2; ++} ++ + /* Implement TARGET_MEMORY_MOVE_COST. */ + + static int +@@ -10785,6 +10797,8 @@ loongarch_asan_shadow_offset (void) + #undef TARGET_REGISTER_MOVE_COST + #define TARGET_REGISTER_MOVE_COST loongarch_register_move_cost + #undef TARGET_MEMORY_MOVE_COST ++#undef TARGET_REGISTER_PRIORITY ++#define TARGET_REGISTER_PRIORITY loongarch_register_priority + #define TARGET_MEMORY_MOVE_COST loongarch_memory_move_cost + #undef TARGET_RTX_COSTS + #define TARGET_RTX_COSTS loongarch_rtx_costs +-- +2.43.5 + diff --git a/gcc-8.3.0-Use-the-model-fsched-pressure-algorithm-by.patch b/gcc-8.3.0-Use-the-model-fsched-pressure-algorithm-by.patch new file mode 100644 index 0000000..dbc63de --- /dev/null +++ b/gcc-8.3.0-Use-the-model-fsched-pressure-algorithm-by.patch @@ -0,0 +1,31 @@ +From 1e55f0d0c62f991831db4a27ec8e775974a17be7 Mon Sep 17 00:00:00 2001 +From: Jiahao Xu +Date: Fri, 4 Aug 2023 17:20:34 +0800 +Subject: [PATCH 13/30] gcc-8.3.0: Use the model -fsched-pressure algorithm by + default. + +Change-Id: I7fea3ec841e9b11ab0fcf6881719951ecdc14760 +--- + gcc/config/loongarch/loongarch.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch.c b/gcc/config/loongarch/loongarch.c +index 15e29abc9..7b5ab23df 100644 +--- a/gcc/config/loongarch/loongarch.c ++++ b/gcc/config/loongarch/loongarch.c +@@ -7046,6 +7046,12 @@ loongarch_cpu_option_override (struct loongarch_target *target, + maybe_set_param_value (PARAM_L2_CACHE_SIZE, + loongarch_cpu_cache[target->cpu_tune].l2d_size, + opts->x_param_values, opts_set->x_param_values); ++ ++ /* Use the 'model' -fsched-pressure algorithm by default. */ ++ maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, ++ SCHED_PRESSURE_MODEL, ++ opts->x_param_values, ++ global_options_set.x_param_values); + } + + static void +-- +2.43.5 + diff --git a/gcc.spec b/gcc.spec index 5dcc5b9..7c9880e 100644 --- a/gcc.spec +++ b/gcc.spec @@ -1,4 +1,4 @@ -%define anolis_release .0.1 +%define anolis_release .0.2 %global DATE 20210514 %global gitrev a3253c88425835d5b339d6998a1110a66ccd8b44 %global gcc_version 8.5.0 @@ -328,6 +328,36 @@ Patch1011: LoongArch-Add-sanitizer-support.patch Patch1012: libitm-Add-LoongArch-support.patch Patch1013: LoongArch-Add-missing-headers.patch Patch1014: Fix-dwarf2cfi-error.patch +Patch1015: gcc-8.3.0-Fix-bug-for-simpley.patch +Patch1016: Add-elf-support.patch +Patch1017: Add-vec_initv32qiv16qi-template-for-2x128bit-grouped.patch +Patch1018: Fix-accuracy-when-using-mrecip-div-which-leads-to-Sp.patch +Patch1019: gcc-8.3.0-Fix-the-bug-in-loongarch_emit_stack_tie.patch +Patch1020: Optimize-the-implementation-of-multiplication-operat.patch +Patch1021: gcc-8.3.0-Export-headers-to-lib-gcc-loongarch64-linu.patch +Patch1022: libffi-Add-loongarch-support.patch +Patch1023: Optimize-float-vector-unpack-operation.patch +Patch1024: LoongArch-add-gnat-ada-compiler-support.patch +Patch1025: LoongArch-support-static-pie.patch +Patch1026: gcc-8.3.0-Fix-bug-when-using-mrecip-rsqrt-which-casu.patch +Patch1027: gcc-8.3.0-Use-the-model-fsched-pressure-algorithm-by.patch +Patch1028: gcc-8.3.0-Lower-the-priority-of-registers-t3-through.patch +Patch1029: Modify-MOVE_RATIO-1-using-4-consecutive-scalar.patch +Patch1030: LoongArch-Optimizations-of-vector-construction.patch +Patch1031: LoongArch-Add-tests-for-SX-and-ASX-vector-instructio.patch +Patch1032: Implement-128-bit-floating-point-built-in-function.patch +Patch1033: LoongArch-Define-macro-CLEAR_INSN_CACHE.patch +Patch1034: LoongArch-enable-__builtin_thread_pointer.patch +Patch1035: LoongArch-Use-simplify_gen_subreg-instead-of-gen_rtx.patch +Patch1036: LoongArch-Fix-insn-output-of-vec_concat-templates-fo.patch +Patch1037: Fix-emit-target-register-when-expand-conditional-mov.patch +Patch1038: LoongArch-Use-LSX-for-scalar-FP-rounding-with-explic.patch +Patch1039: LoongArch-Implement-su-sadv16qi-and-su-sadv32qi-stan.patch +Patch1040: LoongArch-Implement-option-save-restore.patch +Patch1041: asan.c-asan_emit_stack_protection-Use-full-sized-mas.patch +Patch1042: gcc-8.3.0-Delete-the-movti-movtf-templates-and-fix-t.patch +Patch1043: Implement-alternate-__intN__-form-of-__intN-type.patch +Patch1044: LoongArch-Remove-bash-syntax-from-config.gcc.patch # On ARM EABI systems, we do want -gnueabi to be part of the # target triple. @@ -1002,6 +1032,36 @@ rm -f gcc/testsuite/go.test/test/chan/goroutines.go %patch1012 -p1 %patch1013 -p1 %patch1014 -p1 +%patch1015 -p1 +%patch1016 -p1 +%patch1017 -p1 +%patch1018 -p1 +%patch1019 -p1 +%patch1020 -p1 +%patch1021 -p1 +%patch1022 -p1 +%patch1023 -p1 +%patch1024 -p1 +%patch1025 -p1 +%patch1026 -p1 +%patch1027 -p1 +%patch1028 -p1 +%patch1029 -p1 +%patch1030 -p1 +%patch1031 -p1 +%patch1032 -p1 +%patch1033 -p1 +%patch1034 -p1 +%patch1035 -p1 +%patch1036 -p1 +%patch1037 -p1 +%patch1038 -p1 +%patch1039 -p1 +%patch1040 -p1 +%patch1041 -p1 +%patch1042 -p1 +%patch1043 -p1 +%patch1044 -p1 %endif %build @@ -3400,6 +3460,9 @@ fi %{ANNOBIN_GCC_PLUGIN_DIR}/gcc-annobin.so.0.0.0 %changelog +* Mon Aug 19 2024 Peng Fan 8.5.0-22.0.2 +- LoongArch: Sync to .vec.39 + * Wed Jul 17 2024 Xue haolin 8.5.0-22.0.1 - Rebrand for Anolis OS. - Separate LoongArch's supported patches.(fanpeng@loongson.cn) diff --git a/libffi-Add-loongarch-support.patch b/libffi-Add-loongarch-support.patch new file mode 100644 index 0000000..becc794 --- /dev/null +++ b/libffi-Add-loongarch-support.patch @@ -0,0 +1,881 @@ +From 0f78bbf2e7d28b63a53c64085a5b18365458f7c3 Mon Sep 17 00:00:00 2001 +From: Peng Fan +Date: Mon, 12 Aug 2024 07:01:02 +0000 +Subject: [PATCH 08/30] libffi: Add loongarch support + +Signed-off-by: Peng Fan +--- + libffi/src/loongarch/ffi.c | 488 +++++++++++++++++++++++++++++++ + libffi/src/loongarch/ffitarget.h | 69 +++++ + libffi/src/loongarch/sysv.S | 288 ++++++++++++++++++ + 3 files changed, 845 insertions(+) + create mode 100644 libffi/src/loongarch/ffi.c + create mode 100644 libffi/src/loongarch/ffitarget.h + create mode 100644 libffi/src/loongarch/sysv.S + +diff --git a/libffi/src/loongarch/ffi.c b/libffi/src/loongarch/ffi.c +new file mode 100644 +index 000000000..fd11f2013 +--- /dev/null ++++ b/libffi/src/loongarch/ffi.c +@@ -0,0 +1,488 @@ ++/* ----------------------------------------------------------------------- ++ ffi.c - Copyright (c) 2015 Michael Knyszek ++ 2015 Andrew Waterman ++ 2018 Stef O'Rear ++ Based on MIPS N32/64 port ++ ++ LOONGARCH Foreign Function Interface ++ ++ Permission is hereby granted, free of charge, to any person obtaining ++ a copy of this software and associated documentation files (the ++ ``Software''), to deal in the Software without restriction, including ++ without limitation the rights to use, copy, modify, merge, publish, ++ distribute, sublicense, and/or sell copies of the Software, and to ++ permit persons to whom the Software is furnished to do so, subject to ++ the following conditions: ++ ++ The above copyright notice and this permission notice shall be included ++ in all copies or substantial portions of the Software. ++ ++ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ DEALINGS IN THE SOFTWARE. ++ ----------------------------------------------------------------------- */ ++ ++#include ++#include ++ ++#include ++#include ++ ++#define ABI_FLEN 64 ++#define ABI_FLOAT double ++ ++#define NARGREG 8 ++#define STKALIGN 16 ++#define MAXCOPYARG (2 * sizeof(double)) ++ ++typedef struct call_context ++{ ++ ABI_FLOAT fa[8]; ++ size_t a[8]; ++ /* used by the assembly code to in-place construct its own stack frame */ ++ char frame[16]; ++} call_context; ++ ++typedef struct call_builder ++{ ++ call_context *aregs; ++ int used_integer; ++ int used_float; ++ size_t *used_stack; ++} call_builder; ++ ++/* integer (not pointer) less than ABI XLEN */ ++/* FFI_TYPE_INT does not appear to be used */ ++#if __SIZEOF_POINTER__ == 8 ++#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT64) ++#else ++#define IS_INT(type) ((type) >= FFI_TYPE_UINT8 && (type) <= FFI_TYPE_SINT32) ++#endif ++ ++#if ABI_FLEN ++typedef struct { ++ char as_elements, type1, offset2, type2; ++} float_struct_info; ++ ++#if ABI_FLEN >= 64 ++#define IS_FLOAT(type) ((type) >= FFI_TYPE_FLOAT && (type) <= FFI_TYPE_DOUBLE) ++#else ++#define IS_FLOAT(type) ((type) == FFI_TYPE_FLOAT) ++#endif ++ ++static ffi_type **flatten_struct(ffi_type *in, ffi_type **out, ffi_type **out_end) { ++ int i; ++ if (out == out_end) return out; ++ if (in->type != FFI_TYPE_STRUCT) { ++ *(out++) = in; ++ } else { ++ for (i = 0; in->elements[i]; i++) ++ out = flatten_struct(in->elements[i], out, out_end); ++ } ++ return out; ++} ++ ++/* Structs with at most two fields after flattening, one of which is of ++ floating point type, are passed in multiple registers if sufficient ++ registers are available. */ ++static float_struct_info struct_passed_as_elements(call_builder *cb, ffi_type *top) { ++ float_struct_info ret = {0, 0, 0, 0}; ++ ffi_type *fields[3]; ++ int num_floats, num_ints; ++ int num_fields = flatten_struct(top, fields, fields + 3) - fields; ++ ++ if (num_fields == 1) { ++ if (IS_FLOAT(fields[0]->type)) { ++ ret.as_elements = 1; ++ ret.type1 = fields[0]->type; ++ } ++ } else if (num_fields == 2) { ++ num_floats = IS_FLOAT(fields[0]->type) + IS_FLOAT(fields[1]->type); ++ num_ints = IS_INT(fields[0]->type) + IS_INT(fields[1]->type); ++ if (num_floats == 0 || num_floats + num_ints != 2) ++ return ret; ++ if (cb->used_float + num_floats > NARGREG || cb->used_integer + (2 - num_floats) > NARGREG) ++ return ret; ++ if (!IS_FLOAT(fields[0]->type) && !IS_FLOAT(fields[1]->type)) ++ return ret; ++ ++ ret.type1 = fields[0]->type; ++ ret.type2 = fields[1]->type; ++ ret.offset2 = ALIGN(fields[0]->size, fields[1]->alignment); ++ ret.as_elements = 1; ++ } ++ ++ return ret; ++} ++#endif ++ ++/* allocates a single register, float register, or XLEN-sized stack slot to a datum */ ++static void marshal_atom(call_builder *cb, int type, void *data) { ++ size_t value = 0; ++ switch (type) { ++ case FFI_TYPE_UINT8: value = *(uint8_t *)data; break; ++ case FFI_TYPE_SINT8: value = *(int8_t *)data; break; ++ case FFI_TYPE_UINT16: value = *(uint16_t *)data; break; ++ case FFI_TYPE_SINT16: value = *(int16_t *)data; break; ++ /* 32-bit quantities are always sign-extended in the ABI */ ++ case FFI_TYPE_UINT32: value = *(int32_t *)data; break; ++ case FFI_TYPE_SINT32: value = *(int32_t *)data; break; ++#if __SIZEOF_POINTER__ == 8 ++ case FFI_TYPE_UINT64: value = *(uint64_t *)data; break; ++ case FFI_TYPE_SINT64: value = *(int64_t *)data; break; ++#endif ++ case FFI_TYPE_POINTER: value = *(size_t *)data; break; ++ ++ /* float values may be recoded in an implementation-defined way ++ by hardware conforming to 2.1 or earlier, so use asm to ++ reinterpret floats as doubles */ ++#if ABI_FLEN >= 32 ++ case FFI_TYPE_FLOAT: ++ asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(float *)data)); ++ return; ++#endif ++#if ABI_FLEN >= 64 ++ case FFI_TYPE_DOUBLE: ++ asm("" : "=f"(cb->aregs->fa[cb->used_float++]) : "0"(*(double *)data)); ++ return; ++#endif ++ default: FFI_ASSERT(0); break; ++ } ++ ++ if (cb->used_integer == NARGREG) { ++ *cb->used_stack++ = value; ++ } else { ++ cb->aregs->a[cb->used_integer++] = value; ++ } ++} ++ ++static void unmarshal_atom(call_builder *cb, int type, void *data) { ++ size_t value; ++ switch (type) { ++#if ABI_FLEN >= 32 ++ case FFI_TYPE_FLOAT: ++ asm("" : "=f"(*(float *)data) : "0"(cb->aregs->fa[cb->used_float++])); ++ return; ++#endif ++#if ABI_FLEN >= 64 ++ case FFI_TYPE_DOUBLE: ++ asm("" : "=f"(*(double *)data) : "0"(cb->aregs->fa[cb->used_float++])); ++ return; ++#endif ++ } ++ ++ if (cb->used_integer == NARGREG) { ++ value = *cb->used_stack++; ++ } else { ++ value = cb->aregs->a[cb->used_integer++]; ++ } ++ ++ switch (type) { ++ case FFI_TYPE_UINT8: *(uint8_t *)data = value; break; ++ case FFI_TYPE_SINT8: *(uint8_t *)data = value; break; ++ case FFI_TYPE_UINT16: *(uint16_t *)data = value; break; ++ case FFI_TYPE_SINT16: *(uint16_t *)data = value; break; ++ case FFI_TYPE_UINT32: *(uint32_t *)data = value; break; ++ case FFI_TYPE_SINT32: *(uint32_t *)data = value; break; ++#if __SIZEOF_POINTER__ == 8 ++ case FFI_TYPE_UINT64: *(uint64_t *)data = value; break; ++ case FFI_TYPE_SINT64: *(uint64_t *)data = value; break; ++#endif ++ case FFI_TYPE_POINTER: *(size_t *)data = value; break; ++ default: FFI_ASSERT(0); break; ++ } ++} ++ ++/* adds an argument to a call, or a not by reference return value */ ++static void marshal(call_builder *cb, ffi_type *type, int var, void *data) { ++ size_t realign[2]; ++ ++#if ABI_FLEN ++ if (!var && type->type == FFI_TYPE_STRUCT) { ++ float_struct_info fsi = struct_passed_as_elements(cb, type); ++ if (fsi.as_elements) { ++ marshal_atom(cb, fsi.type1, data); ++ if (fsi.offset2) ++ marshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); ++ return; ++ } ++ } ++ ++ if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { ++ marshal_atom(cb, type->type, data); ++ return; ++ } ++ ++ double promoted; ++ if (var && type->type == FFI_TYPE_FLOAT) ++ { ++ /* C standard requires promoting float -> double for variable arg */ ++ promoted = *(float *)data; ++ type = &ffi_type_double; ++ data = &promoted; ++ } ++#endif ++ ++ if (type->size > 2 * __SIZEOF_POINTER__) { ++ /* pass by reference */ ++ marshal_atom(cb, FFI_TYPE_POINTER, &data); ++ } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { ++ marshal_atom(cb, type->type, data); ++ } else { ++ /* overlong integers, soft-float floats, and structs without special ++ float handling are treated identically from this point on */ ++ ++ /* variadics are aligned even in registers */ ++ if (type->alignment > __SIZEOF_POINTER__) { ++ if (var) ++ cb->used_integer = ALIGN(cb->used_integer, 2); ++ cb->used_stack = (size_t *)ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); ++ } ++ ++ memcpy(realign, data, type->size); ++ if (type->size > 0) ++ marshal_atom(cb, FFI_TYPE_POINTER, realign); ++ if (type->size > __SIZEOF_POINTER__) ++ marshal_atom(cb, FFI_TYPE_POINTER, realign + 1); ++ } ++} ++ ++/* for arguments passed by reference returns the pointer, otherwise the arg is copied (up to MAXCOPYARG bytes) */ ++static void *unmarshal(call_builder *cb, ffi_type *type, int var, void *data) { ++ size_t realign[2]; ++ void *pointer; ++ ++#if ABI_FLEN ++ if (!var && type->type == FFI_TYPE_STRUCT) { ++ float_struct_info fsi = struct_passed_as_elements(cb, type); ++ if (fsi.as_elements) { ++ unmarshal_atom(cb, fsi.type1, data); ++ if (fsi.offset2) ++ unmarshal_atom(cb, fsi.type2, ((char*)data) + fsi.offset2); ++ return data; ++ } ++ } ++ ++ if (!var && cb->used_float < NARGREG && IS_FLOAT(type->type)) { ++ unmarshal_atom(cb, type->type, data); ++ return data; ++ } ++ ++ if (var && type->type == FFI_TYPE_FLOAT) ++ { ++ int m = cb->used_integer; ++ void *promoted = m < NARGREG ? cb->aregs->a + m:cb->used_stack + m - NARGREG + 1; ++ *(float*)promoted = *(double *)promoted; ++ } ++#endif ++ ++ if (type->size > 2 * __SIZEOF_POINTER__) { ++ /* pass by reference */ ++ unmarshal_atom(cb, FFI_TYPE_POINTER, (char*)&pointer); ++ return pointer; ++ } else if (IS_INT(type->type) || type->type == FFI_TYPE_POINTER) { ++ unmarshal_atom(cb, type->type, data); ++ return data; ++ } else { ++ /* overlong integers, soft-float floats, and structs without special ++ float handling are treated identically from this point on */ ++ ++ /* variadics are aligned even in registers */ ++ if (type->alignment > __SIZEOF_POINTER__) { ++ if (var) ++ cb->used_integer = ALIGN(cb->used_integer, 2); ++ cb->used_stack = (size_t *)ALIGN(cb->used_stack, 2*__SIZEOF_POINTER__); ++ } ++ ++ if (type->size > 0) ++ unmarshal_atom(cb, FFI_TYPE_POINTER, realign); ++ if (type->size > __SIZEOF_POINTER__) ++ unmarshal_atom(cb, FFI_TYPE_POINTER, realign + 1); ++ memcpy(data, realign, type->size); ++ return data; ++ } ++} ++ ++static int passed_by_ref(call_builder *cb, ffi_type *type, int var) { ++#if ABI_FLEN ++ if (!var && type->type == FFI_TYPE_STRUCT) { ++ float_struct_info fsi = struct_passed_as_elements(cb, type); ++ if (fsi.as_elements) return 0; ++ } ++#endif ++ ++ return type->size > 2 * __SIZEOF_POINTER__; ++} ++ ++/* Perform machine dependent cif processing */ ++ffi_status ffi_prep_cif_machdep(ffi_cif *cif) { ++ cif->loongarch_nfixedargs = cif->nargs; ++ return FFI_OK; ++} ++ ++/* Perform machine dependent cif processing when we have a variadic function */ ++ ++ffi_status ffi_prep_cif_machdep_var(ffi_cif *cif, unsigned int nfixedargs, unsigned int ntotalargs) { ++ cif->loongarch_nfixedargs = nfixedargs; ++ return FFI_OK; ++} ++ ++/* Low level routine for calling functions */ ++extern void ffi_call_asm (void *stack, struct call_context *regs, ++ void (*fn) (void), void *closure) FFI_HIDDEN; ++ ++static void ++ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, ++ void *closure) ++{ ++ /* this is a conservative estimate, assuming a complex return value and ++ that all remaining arguments are long long / __int128 */ ++ size_t arg_bytes = cif->nargs <= 3 ? 0 : ++ ALIGN(2 * sizeof(size_t) * (cif->nargs - 3), STKALIGN); ++ size_t rval_bytes = 0; ++ if (rvalue == NULL && cif->rtype->size > 2*__SIZEOF_POINTER__) ++ rval_bytes = ALIGN(cif->rtype->size, STKALIGN); ++ size_t alloc_size = arg_bytes + rval_bytes + sizeof(call_context); ++ ++ /* the assembly code will deallocate all stack data at lower addresses ++ than the argument region, so we need to allocate the frame and the ++ return value after the arguments in a single allocation */ ++ size_t alloc_base; ++ /* Argument region must be 16-byte aligned */ ++ if (_Alignof(max_align_t) >= STKALIGN) { ++ /* since sizeof long double is normally 16, the compiler will ++ guarantee alloca alignment to at least that much */ ++ alloc_base = (size_t)alloca(alloc_size); ++ } else { ++ alloc_base = ALIGN(alloca(alloc_size + STKALIGN - 1), STKALIGN); ++ } ++ ++ if (rval_bytes) ++ rvalue = (void*)(alloc_base + arg_bytes); ++ ++ call_builder cb; ++ cb.used_float = cb.used_integer = 0; ++ cb.aregs = (call_context*)(alloc_base + arg_bytes + rval_bytes); ++ cb.used_stack = (void*)alloc_base; ++ ++ int return_by_ref = passed_by_ref(&cb, cif->rtype, 0); ++ if (return_by_ref) ++ marshal(&cb, &ffi_type_pointer, 0, &rvalue); ++ ++ int i; ++ for (i = 0; i < cif->nargs; i++) ++ marshal(&cb, cif->arg_types[i], i >= cif->loongarch_nfixedargs, avalue[i]); ++ ++ ffi_call_asm ((void *) alloc_base, cb.aregs, fn, closure); ++ ++ cb.used_float = cb.used_integer = 0; ++ if (!return_by_ref && rvalue) ++ unmarshal(&cb, cif->rtype, 0, rvalue); ++} ++ ++void ++ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) ++{ ++ ffi_call_int(cif, fn, rvalue, avalue, NULL); ++} ++ ++void ++ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, ++ void **avalue, void *closure) ++{ ++ ffi_call_int(cif, fn, rvalue, avalue, closure); ++} ++ ++extern void ffi_closure_asm(void) FFI_HIDDEN; ++ ++ffi_status ffi_prep_closure_loc(ffi_closure *closure, ffi_cif *cif, void (*fun)(ffi_cif*,void*,void**,void*), void *user_data, void *codeloc) ++{ ++ uint32_t *tramp = (uint32_t *) &closure->tramp[0]; ++ uint64_t fn = (uint64_t) (uintptr_t) ffi_closure_asm; ++ ++ if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) ++ return FFI_BAD_ABI; ++ ++ /* we will call ffi_closure_inner with codeloc, not closure, but as long ++ as the memory is readable it should work */ ++ ++ tramp[0] = 0x1800000c; /* pcaddi $t0, 0 (i.e. $t0 <- tramp) */ ++#ifdef _ABILP64 ++ tramp[1] = 0x28c0418d; /* ld.d $t1, $t0, 16 */ ++#endif ++ tramp[2] = 0x4c0001a0; /* jirl $zero, $t1, 0 */ ++ tramp[3] = 0x03400000; /* nop */ ++ tramp[4] = fn; ++ tramp[5] = fn >> 32; ++ ++ closure->cif = cif; ++ closure->fun = fun; ++ closure->user_data = user_data; ++ ++ __builtin___clear_cache(codeloc, codeloc + FFI_TRAMPOLINE_SIZE); ++ ++ return FFI_OK; ++} ++ ++extern void ffi_go_closure_asm (void) FFI_HIDDEN; ++ ++ffi_status ++ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, ++ void (*fun) (ffi_cif *, void *, void **, void *)) ++{ ++ if (cif->abi <= FFI_FIRST_ABI || cif->abi >= FFI_LAST_ABI) ++ return FFI_BAD_ABI; ++ ++ closure->tramp = (void *) ffi_go_closure_asm; ++ closure->cif = cif; ++ closure->fun = fun; ++ ++ return FFI_OK; ++} ++ ++/* Called by the assembly code with aregs pointing to saved argument registers ++ and stack pointing to the stacked arguments. Return values passed in ++ registers will be reloaded from aregs. */ ++void FFI_HIDDEN ++ffi_closure_inner (ffi_cif *cif, ++ void (*fun) (ffi_cif *, void *, void **, void *), ++ void *user_data, ++ size_t *stack, call_context *aregs) ++{ ++ void **avalue = alloca(cif->nargs * sizeof(void*)); ++ /* storage for arguments which will be copied by unmarshal(). We could ++ theoretically avoid the copies in many cases and use at most 128 bytes ++ of memory, but allocating disjoint storage for each argument is ++ simpler. */ ++ char *astorage = alloca(cif->nargs * MAXCOPYARG); ++ void *rvalue; ++ call_builder cb; ++ int return_by_ref; ++ int i; ++ ++ cb.aregs = aregs; ++ cb.used_integer = cb.used_float = 0; ++ cb.used_stack = stack; ++ ++ return_by_ref = passed_by_ref(&cb, cif->rtype, 0); ++ if (return_by_ref) ++ unmarshal(&cb, &ffi_type_pointer, 0, &rvalue); ++ else ++ rvalue = alloca(cif->rtype->size); ++ ++ for (i = 0; i < cif->nargs; i++) ++ avalue[i] = unmarshal(&cb, cif->arg_types[i], ++ i >= cif->loongarch_nfixedargs, astorage + i*MAXCOPYARG); ++ ++ fun (cif, rvalue, avalue, user_data); ++ ++ if (!return_by_ref && cif->rtype->type != FFI_TYPE_VOID) { ++ cb.used_integer = cb.used_float = 0; ++ marshal(&cb, cif->rtype, 0, rvalue); ++ } ++} +diff --git a/libffi/src/loongarch/ffitarget.h b/libffi/src/loongarch/ffitarget.h +new file mode 100644 +index 000000000..b13b3af5e +--- /dev/null ++++ b/libffi/src/loongarch/ffitarget.h +@@ -0,0 +1,69 @@ ++/* -----------------------------------------------------------------*-C-*- ++ ffitarget.h - 2014 Michael Knyszek ++ ++ Target configuration macros for LOONGARCH. ++ ++ Permission is hereby granted, free of charge, to any person obtaining ++ a copy of this software and associated documentation files (the ++ ``Software''), to deal in the Software without restriction, including ++ without limitation the rights to use, copy, modify, merge, publish, ++ distribute, sublicense, and/or sell copies of the Software, and to ++ permit persons to whom the Software is furnished to do so, subject to ++ the following conditions: ++ ++ The above copyright notice and this permission notice shall be included ++ in all copies or substantial portions of the Software. ++ ++ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ DEALINGS IN THE SOFTWARE. ++ ++ ----------------------------------------------------------------------- */ ++ ++#ifndef LIBFFI_TARGET_H ++#define LIBFFI_TARGET_H ++ ++#ifndef LIBFFI_H ++#error "Please do not include ffitarget.h directly into your source. Use ffi.h instead." ++#endif ++ ++#ifndef __loongarch__ ++#error "libffi was configured for a LOONGARCH target but this does not appear to be a LOONGARCH compiler." ++#endif ++ ++#ifndef LIBFFI_ASM ++ ++typedef unsigned long ffi_arg; ++typedef signed long ffi_sarg; ++ ++/* FFI_UNUSED_NN and loongarch_unused are to maintain ABI compatibility with a ++ distributed Berkeley patch from 2014, and can be removed at SONAME bump */ ++typedef enum ffi_abi { ++ FFI_FIRST_ABI = 0, ++ FFI_LP64, ++ FFI_UNUSED_1, ++ FFI_UNUSED_2, ++ FFI_UNUSED_3, ++ FFI_LAST_ABI, ++ ++ FFI_DEFAULT_ABI = FFI_LP64 ++} ffi_abi; ++ ++#endif /* LIBFFI_ASM */ ++ ++/* ---- Definitions for closures ----------------------------------------- */ ++ ++#define FFI_CLOSURES 1 ++#define FFI_GO_CLOSURES 1 ++#define FFI_TRAMPOLINE_SIZE 24 ++#define FFI_NATIVE_RAW_API 0 ++#define FFI_EXTRA_CIF_FIELDS unsigned loongarch_nfixedargs; unsigned loongarch_unused; ++#define FFI_TARGET_SPECIFIC_VARIADIC ++//#define FFI_TARGET_HAS_COMPLEX_TYPE 1 ++#endif ++ +diff --git a/libffi/src/loongarch/sysv.S b/libffi/src/loongarch/sysv.S +new file mode 100644 +index 000000000..91027ca8f +--- /dev/null ++++ b/libffi/src/loongarch/sysv.S +@@ -0,0 +1,288 @@ ++/* ----------------------------------------------------------------------- ++ ffi.c - Copyright (c) 2015 Michael Knyszek ++ 2015 Andrew Waterman ++ 2018 Stef O'Rear ++ ++ LOONGARCH Foreign Function Interface ++ ++ Permission is hereby granted, free of charge, to any person obtaining ++ a copy of this software and associated documentation files (the ++ ``Software''), to deal in the Software without restriction, including ++ without limitation the rights to use, copy, modify, merge, publish, ++ distribute, sublicense, and/or sell copies of the Software, and to ++ permit persons to whom the Software is furnished to do so, subject to ++ the following conditions: ++ ++ The above copyright notice and this permission notice shall be included ++ in all copies or substantial portions of the Software. ++ ++ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ DEALINGS IN THE SOFTWARE. ++ ----------------------------------------------------------------------- */ ++ ++#define LIBFFI_ASM ++#include ++#include ++ ++/* Define aliases so that we can handle all ABIs uniformly */ ++ ++#if __SIZEOF_POINTER__ == 8 ++#define PTRS 8 ++#define LARG ld.d ++#define SARG st.d ++#else ++#define PTRS 4 ++#define LARG ld.w ++#define SARG st.w ++#endif ++ ++#ifdef __loongarch_hard_float ++# if defined __loongarch_single_float ++# define FLT float ++# define FLEN 4 ++# define FLD fld.w ++# define FST fst.w ++# error "need check" ++# else ++# define FLT double ++# define FLEN 8 ++# define FLARG fld.d ++# define FSARG fst.d ++# endif ++#else ++# define FLEN 0 ++# error "need check" ++#endif ++ ++#define FLTS 8 ++ ++ ++ .text ++ .globl ffi_call_asm ++ .type ffi_call_asm, @function ++ .hidden ffi_call_asm ++/* ++ struct call_context { ++ floatreg fa[8]; ++ intreg a[8]; ++ intreg pad[rv32 ? 2 : 0]; ++ intreg save_fp, save_ra; ++ } ++ void ffi_call_asm (size_t *stackargs, struct call_context *regargs, ++ void (*fn) (void), void *closure); ++*/ ++ ++#define FRAME_LEN (8 * FLTS + 8 * PTRS + 8 * 2) ++ ++ffi_call_asm: ++ .cfi_startproc ++ ++ /* ++ We are NOT going to set up an ordinary stack frame. In order to pass ++ the stacked args to the called function, we adjust our stack pointer to ++ a0, which is in the _caller's_ alloca area. We establish our own stack ++ frame at the end of the call_context. ++ ++ Anything below the arguments will be freed at this point, although we ++ preserve the call_context so that it can be read back in the caller. ++ */ ++ ++ .cfi_def_cfa 5, FRAME_LEN # interim CFA based on a1 ++ SARG $fp, $a1, FRAME_LEN - 2*PTRS ++ .cfi_offset 22, -2*PTRS ++ SARG $ra, $a1, FRAME_LEN - 1*PTRS ++ .cfi_offset 1, -1*PTRS ++ ++ addi.d $fp, $a1, FRAME_LEN ++ move $sp, $a0 ++ .cfi_def_cfa 22, 0 # our frame is fully set up ++ ++ # Load arguments ++ move $t1, $a2 ++ move $t2, $a3 ++ ++ FLARG $fa0, $fp, -FRAME_LEN+0*FLTS ++ FLARG $fa1, $fp, -FRAME_LEN+1*FLTS ++ FLARG $fa2, $fp, -FRAME_LEN+2*FLTS ++ FLARG $fa3, $fp, -FRAME_LEN+3*FLTS ++ FLARG $fa4, $fp, -FRAME_LEN+4*FLTS ++ FLARG $fa5, $fp, -FRAME_LEN+5*FLTS ++ FLARG $fa6, $fp, -FRAME_LEN+6*FLTS ++ FLARG $fa7, $fp, -FRAME_LEN+7*FLTS ++ ++ LARG $a0, $fp, -FRAME_LEN+8*FLTS+0*PTRS ++ LARG $a1, $fp, -FRAME_LEN+8*FLTS+1*PTRS ++ LARG $a2, $fp, -FRAME_LEN+8*FLTS+2*PTRS ++ LARG $a3, $fp, -FRAME_LEN+8*FLTS+3*PTRS ++ LARG $a4, $fp, -FRAME_LEN+8*FLTS+4*PTRS ++ LARG $a5, $fp, -FRAME_LEN+8*FLTS+5*PTRS ++ LARG $a6, $fp, -FRAME_LEN+8*FLTS+6*PTRS ++ LARG $a7, $fp, -FRAME_LEN+8*FLTS+7*PTRS ++ ++ /* Call */ ++ jirl $ra,$t1,0 ++ ++ /* Save return values - only a0/a1 (fa0/fa1) are used */ ++ FSARG $fa0, $fp, -FRAME_LEN+0*FLTS ++ FSARG $fa1, $fp, -FRAME_LEN+1*FLTS ++ ++ SARG $a0, $fp, -FRAME_LEN+8*FLTS+0*PTRS ++ SARG $a1, $fp, -FRAME_LEN+8*FLTS+1*PTRS ++ ++ /* Restore and return */ ++ addi.d $sp, $fp, -FRAME_LEN ++ .cfi_def_cfa 3, FRAME_LEN ++ LARG $ra, $fp, -1*PTRS ++ .cfi_restore 1 ++ LARG $fp, $fp, -2*PTRS ++ .cfi_restore 22 ++ jirl $r0, $ra, 0 ++ .cfi_endproc ++ .size ffi_call_asm, .-ffi_call_asm ++ ++ ++/* ++ ffi_closure_asm. Expects address of the passed-in ffi_closure in t1. ++ void ffi_closure_inner (ffi_cif *cif, ++ void (*fun) (ffi_cif *, void *, void **, void *), ++ void *user_data, ++ size_t *stackargs, struct call_context *regargs) ++*/ ++ ++ .globl ffi_closure_asm ++ .hidden ffi_closure_asm ++ .type ffi_closure_asm, @function ++ffi_closure_asm: ++ .cfi_startproc ++ ++ addi.d $sp, $sp, -FRAME_LEN ++ .cfi_def_cfa_offset FRAME_LEN ++ ++ /* make a frame */ ++ SARG $fp, $sp, FRAME_LEN - 2*PTRS ++ .cfi_offset 22, -2*PTRS ++ SARG $ra, $sp, FRAME_LEN - 1*PTRS ++ .cfi_offset 1, -1*PTRS ++ addi.d $fp, $sp, FRAME_LEN ++ ++ /* save arguments */ ++ FSARG $fa0, $sp, 0*FLTS ++ FSARG $fa1, $sp, 1*FLTS ++ FSARG $fa2, $sp, 2*FLTS ++ FSARG $fa3, $sp, 3*FLTS ++ FSARG $fa4, $sp, 4*FLTS ++ FSARG $fa5, $sp, 5*FLTS ++ FSARG $fa6, $sp, 6*FLTS ++ FSARG $fa7, $sp, 7*FLTS ++ ++ SARG $a0, $sp, 8*FLTS+0*PTRS ++ SARG $a1, $sp, 8*FLTS+1*PTRS ++ SARG $a2, $sp, 8*FLTS+2*PTRS ++ SARG $a3, $sp, 8*FLTS+3*PTRS ++ SARG $a4, $sp, 8*FLTS+4*PTRS ++ SARG $a5, $sp, 8*FLTS+5*PTRS ++ SARG $a6, $sp, 8*FLTS+6*PTRS ++ SARG $a7, $sp, 8*FLTS+7*PTRS ++ ++ /* enter C */ ++ LARG $a0, $t0, FFI_TRAMPOLINE_SIZE+0*PTRS ++ LARG $a1, $t0, FFI_TRAMPOLINE_SIZE+1*PTRS ++ LARG $a2, $t0, FFI_TRAMPOLINE_SIZE+2*PTRS ++ addi.d $a3, $sp, FRAME_LEN ++ move $a4, $sp ++ ++ bl ffi_closure_inner ++ ++ /* return values */ ++ FLARG $fa0, $sp, 0*FLTS ++ FLARG $fa1, $sp, 1*FLTS ++ ++ LARG $a0, $sp, 8*FLTS+0*PTRS ++ LARG $a1, $sp, 8*FLTS+1*PTRS ++ ++ /* restore and return */ ++ LARG $ra, $sp, FRAME_LEN-1*PTRS ++ .cfi_restore 1 ++ LARG $fp, $sp, FRAME_LEN-2*PTRS ++ .cfi_restore 22 ++ addi.d $sp, $sp, FRAME_LEN ++ .cfi_def_cfa_offset 0 ++ jirl $r0, $ra, 0 ++ .cfi_endproc ++ .size ffi_closure_asm, .-ffi_closure_asm ++ ++/* ++ ffi_go_closure_asm. Expects address of the passed-in ffi_go_closure in t2. ++ void ffi_closure_inner (ffi_cif *cif, ++ void (*fun) (ffi_cif *, void *, void **, void *), ++ void *user_data, ++ size_t *stackargs, struct call_context *regargs) ++*/ ++ ++ .globl ffi_go_closure_asm ++ .hidden ffi_go_closure_asm ++ .type ffi_go_closure_asm, @function ++ffi_go_closure_asm: ++ .cfi_startproc ++ ++ addi.d $sp, $sp, -FRAME_LEN ++ .cfi_def_cfa_offset FRAME_LEN ++ ++ /* make a frame */ ++ SARG $fp, $sp, FRAME_LEN - 2*PTRS ++ .cfi_offset 22, -2*PTRS ++ SARG $ra, $sp, FRAME_LEN - 1*PTRS ++ .cfi_offset 1, -1*PTRS ++ addi.d $fp, $sp, FRAME_LEN ++ ++ /* save arguments */ ++ FSARG $fa0, $sp, 0*FLTS ++ FSARG $fa1, $sp, 1*FLTS ++ FSARG $fa2, $sp, 2*FLTS ++ FSARG $fa3, $sp, 3*FLTS ++ FSARG $fa4, $sp, 4*FLTS ++ FSARG $fa5, $sp, 5*FLTS ++ FSARG $fa6, $sp, 6*FLTS ++ FSARG $fa7, $sp, 7*FLTS ++ ++ SARG $a0, $sp, 8*FLTS+0*PTRS ++ SARG $a1, $sp, 8*FLTS+1*PTRS ++ SARG $a2, $sp, 8*FLTS+2*PTRS ++ SARG $a3, $sp, 8*FLTS+3*PTRS ++ SARG $a4, $sp, 8*FLTS+4*PTRS ++ SARG $a5, $sp, 8*FLTS+5*PTRS ++ SARG $a6, $sp, 8*FLTS+6*PTRS ++ SARG $a7, $sp, 8*FLTS+7*PTRS ++ ++ /* enter C */ ++ LARG $a0, $t2, 1*PTRS ++ LARG $a1, $t2, 2*PTRS ++ move $a2, $t2 ++ addi.d $a3, $sp, FRAME_LEN ++ move $a4, $sp ++ ++ bl ffi_closure_inner ++ ++ /* return values */ ++ FLARG $fa0, $sp, 0*FLTS ++ FLARG $fa1, $sp, 1*FLTS ++ ++ LARG $a0, $sp, 8*FLTS+0*PTRS ++ LARG $a1, $sp, 8*FLTS+1*PTRS ++ ++ /* restore and return */ ++ LARG $ra, $sp, FRAME_LEN-1*PTRS ++ .cfi_restore 1 ++ LARG $fp, $sp, FRAME_LEN-2*PTRS ++ .cfi_restore 22 ++ addi.d $sp, $sp, FRAME_LEN ++ .cfi_def_cfa_offset 0 ++ jirl $r0, $ra, 0 ++ .cfi_endproc ++ .size ffi_go_closure_asm, .-ffi_go_closure_asm +-- +2.43.5 + -- Gitee